Updating prebuilts and/or headers

c2e810fc3453d74ee0493168dbf7981ba482acd3 - NVIDIA-kernel-module-source-TempVersion/SECURITY.md
7d577fdb9594ae572ff38fdda682a4796ab832ca - NVIDIA-kernel-module-source-TempVersion/COPYING
12f1806bdc25917299525e0e48815306159de132 - NVIDIA-kernel-module-source-TempVersion/Makefile
845f84d973e2d7122831bc1f118f27145c691080 - NVIDIA-kernel-module-source-TempVersion/README.md
4f4410c3c8db46e5a98d7a35f7d909a49de6cb43 - NVIDIA-kernel-module-source-TempVersion/kernel-open/Makefile
d8d7c839f0517ae8092f9c0679d5ca05f03ec741 - NVIDIA-kernel-module-source-TempVersion/kernel-open/conftest.sh
fb6731582ade01ed43aab7b0ad2907736547ee11 - NVIDIA-kernel-module-source-TempVersion/kernel-open/Kbuild
0b1508742a1c5a04b6c3a4be1b48b506f4180848 - NVIDIA-kernel-module-source-TempVersion/kernel-open/dkms.conf
1d17329caf26cdf931122b3c3b7edf4932f43c38 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-msi.h
88399279bd5e31b6e77cb32c7ef6220ce529526b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-hypervisor.h
60ef64c0f15526ae2d786e5cec07f28570f0663b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/conftest.h
ea98628370602119afb1a065ff954784757ddb10 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os_dsi_panel_props.h
c06b2748cd7c8f86b5864d5e9abe6ecf0ab622f0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-hash.h
423282211355a8cb20bff268166885ac90e2986c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_uvm_interface.h
c75bfc368c6ce3fc2c1a0c5062834e90d822b365 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-memdbg.h
35da37c070544f565d0f1de82abc7569b5df06af - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_firmware_types.h
82940edf4650b9be67275d3a360ef4e63387a0a7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/cpuopsys.h
1d8b347e4b92c340a0e9eac77e0f63b9fb4ae977 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl-numbers.h
4b7414705ce10f0a1e312c36a43824b59d572661 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvmisc.h
e4a4f57abb8769d204468b2f5000c81f5ea7c92f - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-procfs.h
9c4a7224553926aac9af460ae4e008bb7d023add - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-proto.h
b249abc0a7d0c9889008e98cb2f8515a9d310b85 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvgputypes.h
e20882a9b14f2bf887e7465d3f238e5ac17bc2f5 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_speculation_barrier.h
5c4c05e5a638888babb5a8af2f0a61c94ecd150b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-format.h
b4c5d759f035b540648117b1bff6b1701476a398 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvCpuUuid.h
880e45b68b19fdb91ac94991f0e6d7fc3b406b1f - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pci-types.h
c45b2faf17ca2a205c56daa11e3cb9d864be2238 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-modeset-interface.h
349696856890bdbe76f457376648522b35f874ef - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvimpshared.h
003b2cbe3d82e467c09371aee86e48d65ae6c29b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl-numa.h
b642fb649ce2ba17f37c8aa73f61b38f99a74986 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-retpoline.h
1e7eec6561b04d2d21c3515987aaa116e9401c1f - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kernel-interface-api.h
3b12d770f8592b94a8c7774c372e80ad08c5774c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvi2c.h
b02c378ac0521c380fc2403f0520949f785b1db6 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-dmabuf.h
3100c536eb4c81ae913b92d4bc5905e752301311 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os-interface.h
143051f69a53db0e7c5d2f846a9c14d666e264b4 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kref.h
3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-gpu-info.h
7b2e2e6ff278acddc6980b330f68e374f38e0a6c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-timer.h
fdbaee144adb26c00776b802560e15f775ed5aef - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-mm.h
befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_stdarg.h
80fcb510fad25cb7a017139f487da1843b7cfcbd - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-lock.h
59d537c1d1b284a9d52277aff87c237e3ec2c99d - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-procfs-utils.h
e3362c33fe6c7cdec013eceac31e8f6f38dc465f - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv_uvm_types.h
5d8de06378994201e91c2179d149c0edcd694900 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvstatuscodes.h
95bf694a98ba78d5a19e66463b8adda631e6ce4c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvstatus.h
4750735d6f3b334499c81d499a06a654a052713d - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-caps.h
009cd8e2b7ee8c0aeb05dac44cc84fc8f6f37c06 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-kapi.h
2473d97c29e22920af1cf15b845287f24e78cdda - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-linux.h
4b1a6c372a531b0d3e0a4e9815dde74cb222447c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/rm-gpu-ops.h
94ad0ba9fd6eb21445baec4fddd7c67a30cceefa - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pci.h
f3e0f71abf34300d322e313adcd4fcbde9aa6f87 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-kthread-q.h
256b5dc6f28738b3ce656c984f01d8f3e13e9faa - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-pgprot.h
c57259130166701bf6d5e5bb1968397716d29fc0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-platform.h
84e9b6cba7ba26ef4032666f769c5b43fa510aad - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-list-helpers.h
53ceca28c6a6da14ef62a4c57545089c48e6b2be - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv.h
910255a4d92e002463175a28e38c3f24716fb654 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvkms-api-types.h
42ece56d0459eb9f27b2497de48f08360c4f7f6b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvlimits.h
4a8b7f3cc65fa530670f510796bef51cf8c4bb6b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-register-module.h
5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/dce_rm_client_ipc.h
906329ae5773732896e6fe94948f7674d0b04c17 - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os_gpio.h
2f5fec803685c61c13f7955baaed056b5524652c - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-ioctl.h
d25291d32caef187daf3589ce4976e4fa6bec70d - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nv-time.h
8c041edbf4ed4fefdfd8006252cf542e34aa617b - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/nvtypes.h
cda75171ca7d8bf920aab6d56ef9aadec16fd15d - NVIDIA-kernel-module-source-TempVersion/kernel-open/common/inc/os/nv_memory_type.h
2ea1436104463c5e3d177e8574c3b4298976d37e - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvkms-ioctl.h
17855f638fd09abfec7d188e49b396793a9f6106 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvkms.h
8bcd1ca9c55362c03a435e226b05796be8c92226 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset-linux.c
0b7e063481a0e195c6e91a4d3464c4792c684f03 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nv-kthread-q.c
07a2d5fa54ff88a0cb30c0945ef3c33ca630a490 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset.Kbuild
8a935bdda64e1d701279ef742b973c5dbed5727b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h
8bedc7374d7a43250e49fb09139c511b489d45e3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci-table.h
9a0f445fda73c69e1bee7f6b121cbed33fcb01bf - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-mmap.c
5f2dafa23c74ba7b04aaf43ef5808457ba9be2fa - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv.c
95ae148b016e4111122c2d9f8f004b53e78998f3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-memdbg.c
9fb0f406f9a5af431f1b72c9c4395b4933dbcf58 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nvidia.Kbuild
3ee953312a6a246d65520fc4a65407f448d1d2b8 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-gpio.c
cded6e9b6324fd429b865173596c8e549a682bba - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv_uvm_interface.c
9f2298f179ad00f1a914b26b274eb2a68068eece - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-i2c.c
c1ebcfec42f7898dd9d909eacd439d288b80523f - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-mlock.c
d11ab03a617b29efcf00f85e24ebce60f91cf82c - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-backlight.c
dc39c4ee87f4dc5f5ccc179a98e07ddb82bb8bce - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-modeset-interface.c
93511db745073b4a906fe28bea03c3b3d76d4df4 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-host1x.c
06e7ec77cd21c43f900984553a4960064753e444 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-platform-pm.c
335e7a5c99c7e8412a425adb82834234cd76b985 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-interface.c
cd7e12552cb5249e5c23147d5cc924681c691e8a - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-platform.c
805042e7cdb9663a0d3ca3064baeec8aa8eb3688 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ibmnpu.c
c7f1aaa6a5f3a3cdf1e5f80adf40b3c9f185fb94 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-report-err.c
0b0ec8d75dfece909db55136731196162c4152d5 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dmabuf.c
84d84563c003d3f568068e7322ce314387a6f579 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-caps.c
94c406f36836c3396b0ca08b4ff71496666b9c43 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-usermap.c
fbae5663e3c278d8206d07ec6446ca4c2781795f - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ibmnpu.h
2c0d17f9babe897435c7dfa43adb96020f45da2b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dsi-parse-panel-props.c
9b701fe42a0e87d62c58b15c553086a608e89f7b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-frontend.h
0ce95e5ed52d6d6ca2bb6aac33ca8f197145ec45 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-procfs-utils.c
cf90d9ea3abced81d182ab3c4161e1b5d3ad280d - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-rsync.h
02b1936dd9a9e30141245209d79b8304b7f12eb9 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-cray.c
5ad9d39b1dde261b61908fa039ca1b60aae46589 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-p2p.c
218aac0c408be15523a2d0b70fdbdadd7e1a2e48 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-imp.c
6d4fbea733fdcd92fc6a8a5884e8bb359f9e8abd - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/rmp2pdefines.h
5ac10d9b20ccd37e1e24d4a81b8ac8f83db981e4 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-vtophys.c
cbfee8ea704ceb9f223e4f32c57e515350b8d9fd - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-dma.c
fc566df59becef7bc7511ae62a9a97b1532a5af2 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-frontend.c
b71bf4426322ab59e78e2a1500509a5f4b2b71ab - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pat.h
a3626bf1b80a81c14408c5181e8bd27696df2caf - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci.c
98c1be29932b843453567d4ada2f9912ea4523d7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-vm.c
0b7e063481a0e195c6e91a4d3464c4792c684f03 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-kthread-q.c
61eadfa0f5b44a3d95e4d2d42d79321fc909c661 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-clk.c
4eee7319202366822e17d29ecec9f662c075e7ac - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-rsync.c
786a71433ddc0411783cb71d4062939981c7db1f - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-acpi.c
64f1c96761f6d9e7e02ab049dd0c810196568036 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pat.c
d844fcaa5b02f1d1a753965a336287148b2ce689 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-p2p.h
dc165103f9196f5f9e97433ec32ef6dded86d4bb - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-registry.c
68d781e929d103e6fa55fa92b5d4f933fbfb6526 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-report-err.h
978d00b0d319c5ad5c0d3732b0e44f4ac0ac9a4c - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv_gpu_ops.h
fbfa2125b2bac1953af6d6fd99352898e516a686 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-msi.c
027fd0ab218eb98abe2b66d05f10b14ebb57e7a3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-nano-timer.c
07f95171c241880c472a630d1ee38fb222be4d59 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nvidia-sources.Kbuild
a392fa800565c8345b07af5132db7078b914d59f - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/os-pci.c
ee894ec530acbd765c04aec93c1c312d42210aeb - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-ipc-soc.c
f179d308e984ff44a82f6e1c6007624f1ac916ba - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-procfs.c
50c54c3fced0934d04ef66231cc4420f6a0dda6c - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-reg.h
7ac10bc4b3b1c5a261388c3f5f9ce0e9b35d7b44 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-usermap.c
d9221522e02e18b037b8929fbc075dc3c1e58654 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia/nv-pci-table.c
8bedc7374d7a43250e49fb09139c511b489d45e3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nv-pci-table.h
eca70b3b8146903ec678a60eebb0462e6ccf4569 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-encoder.h
54cd87e7f8eca85599aad4fcf70573f6361c4332 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.Kbuild
e4bb0073eb9d6f965923bb9874e4714518850a27 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-connector.h
99642b76e9a84b5a1d2e2f4a8c7fb7bcd77a44fd - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.h
8b2063f0cc2e328f4f986c2ce556cfb626c89810 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-utils.c
6528efa1f8061678b8543c5c0be8761cab860858 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-modeset.h
ab63f2a971db8bf10585b1a05fe0e3ca180ad6c7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-os-interface.h
40b5613d1fbbe6b74bff67a5d07974ad321f75f0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-utils.h
f927e6af2d72cf389851b558a0b1400e0f1cec7c - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-helper.c
8c95aa7ab01dd928974ce7880a532557209bd8e0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem.h
b0db208983d1f403fad72067d5557a0c40410fc1 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-connector.c
23586447526d9ffedd7878b6cf5ba00139fadb5e - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h
cbcd6e13d84ea6b52db12eda98be38e321888eb0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-prime-fence.h
a7bc26c1078e95f9ff49c164f3652787adf1fef3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-modeset.c
3703b18511fc6e6eec502ba25c961b8026ab064b - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-crtc.c
c8982ace6fc79f75c092662902c0c61371195f0c - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-linux.c
66b33e4ac9abe09835635f6776c1222deefad741 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-fb.h
6d65ea9f067e09831a8196022bfe00a145bec270 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h
45ec9fd1abfe9a0c7f9ffaf665014cec89c9e7e6 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-crtc.h
7129c765da5bfb77788441fed39b46dc7dc0fa8e - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c
59bb05ef214b5c5f2fe3cf70142dabd47ea70650 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-ioctl.h
6ed7d41b0740987793f6c07d472893af308cfa0f - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-prime-fence.c
044071d60c8cc8ea66c6caaf1b70fe01c4081ad3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-conftest.h
708d02c8bcdfb12e4d55896e667821357c8251ec - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-priv.h
dc0fe38909e2f38e919495b7b4f21652a035a3ee - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm.c
e4efab24f90d397c270568abb337ab815a447fec - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-dma-fence-helper.h
b775af5899366845f9b87393d17a0ab0f1f6a725 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem.c
1e05d0ff4e51a10fa3fcd6519dc915bf13aa69c0 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-helper.h
dd478f7ddb2875fc9ff608858df8d24a62f00152 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-dma-resv-helper.h
892cac6dd51ccfde68b3c29a5676504f93ee8cd7 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-format.c
355126d65ea1472ce3b278066811d4fb764354ec - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c
5209eba37913f5d621a13091783622759706e6e3 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-fb.c
e362c64aa67b47becdbf5c8ba2a245e135adeedf - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c
9a882b31b2acc9e1ad3909c0061eee536e648aae - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-drv.h
170fc390de57f4dd92cf5005a8feabc4e90462d2 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-drv.c
97b6c56b1407de976898e0a8b5a8f38a5211f8bb - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-format.h
d862cc13c29bbce52f6b380b7a0a45a07fe9cbac - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-encoder.c
c294224282118c70cd546ae024a95479ad9b1de4 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h
d9221522e02e18b037b8929fbc075dc3c1e58654 - NVIDIA-kernel-module-source-TempVersion/kernel-open/nvidia-drm/nv-pci-table.c
bda08c8398f68ffc2866ebc390dc63a09a16b0b9 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/unix_rm_handle.c
e903bbbecf4fb3085aaccca0628f0a0e4aba3e58 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_mode_timings_utils.c
5ef40af650eb65b2c87572a1bbfe655d8821f2d5 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_memory_tracker.c
26f2a36442266c5d2664d509ecfd31094a83e152 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/nv_vasprintf.c
9e008270f277e243f9167ab50401602378a2a6e8 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_vasprintf.h
8d9c4d69394b23d689a4aa6727eb3da1d383765a - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/unix_rm_handle.h
07c675d22c4f0f4be6647b65b6487e2d6927c347 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_memory_tracker.h
667b361db93e35d12d979c47e4d7a68be9aa93b6 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/utils/interface/nv_mode_timings_utils.h
881cbcc7ed39ea9198279136205dbe40142be35e - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_assert.h
1c947cfc8a133b00727104684764e5bb900c9d28 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_mode_timings.h
83044eb5259200922f78ad3248fbc1d4de1ec098 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_common_utils.h
2476f128437c0520204e13a4ddd2239ff3f40c21 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv-float.h
a8e49041c1b95431e604852ad0fa3612548e3c82 - NVIDIA-kernel-module-source-TempVersion/src/common/unix/common/inc/nv_dpy_id.h
e3be7ba45506c42d2fca87e9da45db75ced750ca - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_common.h
f669280a5e86ba51b691e2609fa7d8c223bd85dc - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_C671.c
7c2fe72426fa304315e169e91dc6c1c58b5422fd - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_0073.c
381e1b8aeaa8bd586c51db1f9b37d3634285c16a - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_class.h
67db549636b67a32d646fb7fc6c8db2f13689ecc - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9271.c
5e12a290fc91202e4ba9e823b6d8457594ed72d3 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmi_frlInterface.h
d2c79c8a4e914519d653d1f14f706ec4a1f787e8 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9171.c
15d54c86d78404639c7f151adc672e19472dcf4a - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt.c
9be7b7be94a35d1d9a04f269ff560dbbb7860a2a - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9571.c
54a1b5e5aaf0848a72befc896ed12f1de433ad4f - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_9471.c
443c0a4b17a0019e4de3032c93c5cac258529f01 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_internal.h
e6d500269128cbd93790fe68fbcad5ba45c2ba7d - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt_C371.c
90e8ce7638a28cd781b5d30df565116dc1cea9e8 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/hdmipacket/nvhdmipkt.h
f75b1d98895bdccda0db2d8dd8feba53b88180c5 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/displayid.h
65f2503bea8aa1847948cc0d628493e89775c4f3 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_displayid20.c
28d7b753825d5f4a9402aff14488c125453e95c5 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_tv.c
b4813a5e854e75fb38f460e0c27dca8e1ce8dc21 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edid.c
1290abde75d218ae24f930c3b011042a3f360c2e - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/displayid20.h
4a2ad30f49ed92694b717a99ce7adeeb565e8a37 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_861.c
439ef00ffa340bd1b6506970d154a33ca4b64b4a - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dmt.c
cfaa569ac3d63484c86e8a8d7a483dd849f96be8 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_displayid20.c
1997adbf2f6f5be7eb6c7a88e6660391a85d891b - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_gtf.c
49df9034c1634d0a9588e5588efa832a71750a37 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_cvt.c
58b68f1272b069bb7819cbe86fd9e19d8acd0571 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/edid.h
890d8c2898a3277b0fed360301c2dc2688724f47 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_util.c
3023a58fd19d32280607d4027b09fe51fdb7a096 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dsc_pps.h
e66a20fc1579b0dd1392033089f97cf170e8cf10 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/dpsdp.h
b5bd3a58b499216e4fe0e0c9c99525b07ac237dc - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_dsc_pps.c
f531475d8b978bca5b79d39d729b0c9986fe7b36 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvtiming.h
95dae946088f21339299dae48eeafaab31b97b05 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvtiming_pvt.h
0a04709ebdc4acb12038656c433e10c4e7096518 - NVIDIA-kernel-module-source-TempVersion/src/common/modeset/timing/nvt_edidext_displayid.c
1ff879eca2a273293b5cd6048419b2d2d8063b93 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_mulAdd.c
1a86a6948bf6768bd23a19f1f05d40968c1d2b15 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_rem.c
c3ce12c227d25bc0de48fbcf914fc208e2448741 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_sub.c
fb062ecbe62a1f5878fd47f0c61490f2bde279dd - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToI32.c
38bd00e9c4d2f1354c611404cca6209a6c417669 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_countLeadingZeros64.c
0e9694d551848d88531f5461a9b3b91611652e9a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui32_r_minMag.c
9f4d355d85fbe998e243fe4c7bbf8ad23062b6e2 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i64_to_f64.c
23b76c1d0be64e27a6f7e2ea7b8919f1a45a8e7c - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui32_r_minMag.c
5c4ee32cc78efc718aaa60ec31d0b00b1bee3c2c - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui64_r_minMag.c
09cb0cdb90eb23b53cd9c1a76ba26021084710d1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_addMagsF32.c
00c612847b3bd227a006a4a2697df85866b80315 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mulAddF32.c
29321080baa7eab86947ac825561fdcff54a0e43 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i32_to_f32.c
2e0fec421f4defd293cf55c5f3af7d91f4b7d2cc - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui64_to_f32.c
ebb4f674b6213fec29761fc4e05c1e3ddeda6d17 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_mulAdd.c
2e5c29d842a8ebc5fbf987068dc9394cee609cc7 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui64.c
daeb408588738b3eb4c8b092d7f92ac597cf1fc6 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_rem.c
da3b3f94a817909a3dc93ca5fa7675805c7979e0 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_isSignalingNaN.c
bc992c88f3de09e3a82447cf06dbde7c6604f7f8 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_f32.c
dafa667ee5dd52c97fc0c3b7144f6b619406c225 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mulAddF64.c
2960704c290f29aae36b8fe006884d5c4abcabb4 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_div.c
d4b26dc407a891e9ff5324853f1845a99c5d5cd2 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i32.c
0adfa7e174cdb488bb22b06642e14e7fc6f49c67 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToI64.c
fd40a71c7ebf9d632a384fadf9487cfef4f3ea98 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_shiftRightJam128.c
9a5b93459ace2da23964da98617d6b18006fab86 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_countLeadingZeros8.c
ae25eea499b3ea5bdd96c905fd0542da11083048 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normRoundPackToF64.c
729e790328168c64d65a1355e990274c249bbb3a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i32_r_minMag.c
296c40b0589536cb9af3231ad3dcd7f2baaa6887 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_lt.c
5c1026617c588bcf5f1e59230bd5bb900600b9ac - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_mul.c
4b37be398b3e73ae59245f03b2ba2394fc902b4d - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normSubnormalF64Sig.c
69dc4cc63b2a9873a6eb636ee7cb704cbd502001 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui32.c
d0f8f08c225b60d88b6358d344404ba9df3038ec - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normSubnormalF32Sig.c
c951c9dffa123e4f77ed235eca49ef9b67f9f3d2 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_subMagsF64.c
dde685423af544e5359efdb51b4bf9457c67fa3b - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_sqrt.c
577821f706c7de4ca327c1e2fcc34161c96c89f3 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i64_r_minMag.c
5a5e0d9f1ee7e8c0d1d4f9fbcf6eba330a5f1792 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_isSignalingNaN.c
84b0a01ba2a667eb28b166d45bd91352ead83e69 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i64_to_f32.c
513a7d1c3053fc119efcd8ae1bcc9652edc45315 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_lt.c
4445b1fbbd507144f038fd939311ff95bc2cf5f1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui64_to_f64.c
b9fd15957f7ae5effeccb5d8adaa7434b43f44e1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToUI64.c
ab19c6b50c40b8089cb915226d4553d1aa902b0e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i32_r_minMag.c
7bc81f5bc894118c08bfd52b59e010bc068ed762 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui32_to_f32.c
7c8e5ab3f9bf6b2764ce5fffe80b2674be566a12 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/softfloat_state.c
ec1a797b11f6e846928a4a49a8756f288bda1dfa - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/i32_to_f64.c
86fdc2472526375539216461732d1db6a9f85b55 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF32.c
b22876b0695f58ee56143c9f461f1dde32fefbf3 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_ui64.c
d701741d8d6a92bb890e53deda1b795f5787f465 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_le.c
baa7af4eea226140c26ffe6ab02a863d07f729fb - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_eq_signaling.c
ce37cdce572a3b02d42120e81c4969b39d1a67b6 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i32.c
0108fe6f0d394ad72083aff9bb58507f97a0b669 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/ui32_to_f64.c
b8c5ccc1e511637d8b2ba2657de4937b80c01c07 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_le.c
54cbeb5872a86e822bda852ec15d3dcdad4511ce - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_add.c
c29536f617d71fe30accac44b2f1df61c98a97dc - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_div.c
871cb1a4037d7b4e73cb20ad18390736eea7ae36 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui64_r_minMag.c
21a6232d93734b01692689258a3fdfbbf4ff089d - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundToUI32.c
760fd7c257a1f915b61a1089b2acb143c18a082e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_addMagsF64.c
5e6f9e120a17cc73297a35e4d57e4b9cbce01780 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_mul64To128.c
0bf499c0e3a54186fa32b38b310cc9d98ccdcfe3 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_eq.c
29396b7c23941024a59d5ea06698d2fbc7e1a6ca - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_to_i64.c
108eec2abf1cddb397ce9f652465c2e52f7c143b - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_roundToInt.c
fe06512577e642b09196d46430d038d027491e9f - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_eq_signaling.c
d19ff7dfece53875f2d6c6f7dd9e7772f7b0b7ec - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i64_r_minMag.c
1484fc96d7731695bda674e99947280a86990997 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_i64.c
8e58f0258218475616ff4e6317516d40ad475626 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_lt_quiet.c
6fa7493285fe2f7fdc0ac056a6367e90327905c2 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_sub.c
aaf6ccb77a1a89fa055a0fb63513297b35e2e54b - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_le_quiet.c
bbc70102b30f152a560eb98e7a1a4b11b9ede85e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_sqrt.c
e0ad81cfb5d2c0e74dc4ece9518ca15ffc77beaf - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_roundToInt.c
50b3147f8413f0595a4c3d6e6eeab84c1ffecada - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_normRoundPackToF32.c
50daf9186bc5d0180d1453c957164b136d5ffc89 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_eq.c
6f83fa864007e8227ae09bb36a7fdc18832d4445 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_mul.c
a94c8c2bd74633027e52e96f41d24714d8081eb4 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_approxRecipSqrt_1Ks.c
e7890082ce426d88b4ec93893da32e306478c0d1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_approxRecipSqrt32_1.c
2db07bbb8242bc55a24ef483af6d648db0660de0 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_add.c
9266c83f3e50093cc45d7be6ab993a0e72af1685 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF64.c
00ab2120f71117161d4f6daaa9b90a3036a99841 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_ui32.c
824383b03952c611154bea0a862da2b9e2a43827 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_subMagsF32.c
68843a93e1f46195243ef1164f611b759cf19d17 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_le_quiet.c
e4930e155580a0f5aa7f3694a6205bc9aebfe7aa - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_f64.c
054b23a974fc8d0bab232be433c4e516e6c1250a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f64_lt_quiet.c
0d8e42636a3409a647291fdb388001c2b11bba07 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/f32_to_f16.c
d9a86343e6cc75714f65f690082dd4b0ba724be9 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/s_roundPackToF16.c
1dd1b424087d9c872684df0c1b4063b077992d5f - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c
86cda6550cb02bbf595d1667573e4be83702a95e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/specialize.h
21a11759ed2afd746a47c4d78b67640c2d052165 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c
a6d5c83f6a0542b33ac9c23ac65ef69002cfff9d - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c
3d0dbc0a672d039a6346e1c21ddf87ffc9181978 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c
252c816378fddab616b1f2a61e9fedd549224483 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c
d8b0c55a49c4fa0b040541db6d5ff634d7d103e7 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c
d152bc457b655725185bdff42b36bb96d6e6715e - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c
0cbae7a5abc336331d460cbd3640d2cda02af434 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c
1ded4df85ff5fa904fa54c27d681265425be1658 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/primitiveTypes.h
f36c896cfa01f1de9f9420189319e4e00c7fc52a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/internals.h
9645e179cf888bcd0e3836e8126b204b4b42b315 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/softfloat.h
de09949a0ca5cd2a84b882b5b5c874d01d3ae11a - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/primitives.h
4cd1d6cfca3936a39aab9bc0eb622f5c7c848be1 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/source/include/softfloat_types.h
b882497ae393bf66a728dae395b64ac53602a1a5 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/nvidia/nv-softfloat.h
be9407a273620c0ba619b53ed72d59d52620c3e4 - NVIDIA-kernel-module-source-TempVersion/src/common/softfloat/nvidia/platform.h
91e9bc3214d6bb9b20bc8001d85fe8699df5184a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvos.h
88399279bd5e31b6e77cb32c7ef6220ce529526b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv-hypervisor.h
f28f98589e65b71e47dbcb2c4230538ae0545e75 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/cpuopsys.h
4b7414705ce10f0a1e312c36a43824b59d572661 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvmisc.h
af0bc90b3ad4767de53b8ff91e246fdab0146e8b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvsecurityinfo.h
a506a41b8dcf657fb39a740ffc1dfd83835d6c89 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvcfg_sdk.h
b249abc0a7d0c9889008e98cb2f8515a9d310b85 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvgputypes.h
ae60d53603c7ddbbd72d4e16ce2951f3d42aed32 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nverror.h
a31b82c454df785a1d7893af38e83443cfe6f2fc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvdisptypes.h
ffa91e1110a5cc286ec44a7bda5461b2be941ea2 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv_vgpu_types.h
9bca638f5832d831880f090c583fac6fc8cf6ee6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/dpringbuffertypes.h
821a01976045d7c3d2ac35b0f115e90a9e95f8e8 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvimpshared.h
1e7eec6561b04d2d21c3515987aaa116e9401c1f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv-kernel-interface-api.h
3b12d770f8592b94a8c7774c372e80ad08c5774c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvi2c.h
befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nv_stdarg.h
5d8de06378994201e91c2179d149c0edcd694900 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvstatuscodes.h
95bf694a98ba78d5a19e66463b8adda631e6ce4c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvstatus.h
50d31a6d133b0ea9230f9dc1b701ce16a88a7935 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/rs_access.h
eb42327a2b948b79edc04d9145c7aa5b2a2b420e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvlimits.h
9f2e225f027f5a04d1104d29a0039cd2bb7dd85a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvfixedtypes.h
a9bf4969ae3e39cc315b6180ee7055e0ad1279c6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/nvtypes.h
00e9a0ace4b59958a8b048229fb22b4d9e2f8864 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h
3449834cb8b8c630ab1de6df30503c846b26e86b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h
f779cd0470e428160fc590b590f2cd4855950058 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl402c.h
7c4aef225d174ecbe1130d63b8e8ff752bddf48e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0041.h
5abe75cf18a2fede23529194b406c3cf742edced - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h
c8490da9f200f4dbbac7ebe636f3a83485f3001c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073.h
1022bba330a71b92dcc81f47ba460209fcc70cd0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0002.h
b72318d58806bfd25f922107a606b222baa2e28c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h
7a0c878431a9b0d9dda117f165946b1cdf8ebbde - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0020.h
e2d8133537e2687df022c6a966c55fbfea1974f3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0004.h
9c6a4f1d864b5161564869b19f8cb2ce9d629c1d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl003e.h
0639d6cd553994aff4195e8e7547eebf8e713145 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080.h
79204c26eb58ee812cc2f72ee1f6d4d7d93817c7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080.h
ea9aac6f0e23f0de444ac3919c35e4b78c18c942 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h
f7435e356d54d682a949734574388abbe7ffe1d0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h
64f849ed19609320461b8938f24f0b40fb1a35b0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h
d107e41878b5bc50a5c8b29684122c9589625a6f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h
f4a4eeb35e15e0642d1bf4e2e5b31394f4cbbfa1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h
b7b0360b1a6ca78267fa10f7adcd370da86513c3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h
862a17958488d69ca3e92c42ee1bed55cb299fa4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h
bb4182eeea20779f62165d2d50ed209b6a07e54e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h
b7f2957f506dc285acb87d41d34cfd60408b00ae - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080rc.h
c72f147e8fb78126d13567278239acfcd9b9cc1f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h
8dd5acedc0b1613314eb3fe9130a9c282bd49ca1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080clk.h
681c94b982e29049638814f6c1e4eb508f8b0bf3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h
3646710984d5c3024d16f9ab346222ad6dfdb4f0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h
6c34803c213ea0a28114bc921e1867cefebec088 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h
76c9f104e04a8fd9e73e03ad59b2e72264c5f169 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h
9e61da81ecdff15d63f9ae8a1c2f0960b820c65c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h
dac18fcaf5d652b21f84cfba455f4f5972e786c5 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h
d51e47795dfe1fc0bae31b9379d6a39ac4d3080f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h
8a613db1c31724a577c4718752c15d9754882f48 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h
3966d65c9701bf97c807cf87838a08cda10f418d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h
a1830232f18afe44230d6a8598c50b3fc7656089 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h
2dd40e3e41d74de3865bc700acc9ab7e0540c647 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h
f97e7f88aa17788bbbebf55807e449c0ee016384 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h
b2b6b3b413ae17af1afde2fc8672cd1bf48e7b19 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h
3c7130d0613d3c8baef6b23bb63c6ee7a10ed21b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h
39f5e838aa6ab007c56e7a59c7d2986d1a7aa34a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h
6679d97e3852ed78ee44780408c523b94f426ca4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h
090f908931690302e3a2c77f3ce41c4de0c61efc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h
7c4e426dee0ae86c00b3bd10873a1a2bd94ed3b2 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h
5bdddb9a949a78443f83a7da81ad5fee8a300c44 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h
d084d99035f4cc34cd803ff4a5328b9e10ea77fc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h
4b8fa2ce546ae3f06b7dc61df3d534449cdb5b2d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h
8855ee8bad2f2169ebd147e7ac77d9f1340cbad8 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h
82a2e7a2fc6501163d07870f3f640a591f4a8996 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h
f3a855fe7a91c2acf2be41629ce906996e01a9fc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h
3d8e37aa8485aadf55335d8f9f913273d90a2442 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h
da220a5608a0e4c73fa0315b13e2b29d92b114e9 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h
6834a9c75265c25adfb03f0b2dbfe0559f28cadf - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h
051dbfd1d5ff02b2771bc9b3fad8aaef29aab9ae - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h
c3a75647f5ca6cd7b456511af36a9de6d90329c3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h
82364e263f43ea028c2d66db58887958bdef64b0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h
143c1c24ec926142d1f84dec7a543f2b98541545 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h
1684a3a8111fd3d83363cebe68d016a54eaaf686 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h
72292c9844eaf24c38967dd4a879c0c0f070a0de - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h
091f7bac99f5c786a64b6fa59d9d27af786bab10 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h
c0181e959c1ba5ebfc3f130c8764687b58453f9b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h
2a11fc0a499f8293b83e08572f5e6be04bd1da61 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h
a44d2f1b31b8ec124355018204909df19df09748 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h
8ef946f1d7545277ef64891b45a29db44c4e9913 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h
774fd1e730d1d853bf97946f7ecd24c6648c7af4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h
22d828c87b223f937c589a0e863a25d95b734371 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h
7d3819683e9f562a87f36a3e23c043b2b6fd814e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h
7d27fafff043d290b2ec1d2dddbecea2f1df4704 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h
27ad8b5c2406fcd572cd098dd215e93ae1db99e3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h
783db6da0b92b6b8ae26b180129beb0bccb13a5b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h
e6f6beaed64167088608027b442f5449cff027c1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080common.h
6b4418e269bb97b9996b05ea153ccd195c661e11 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h
0ac7e4eb4d952c84c6f4e697cbfcb355069377c2 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h
1651ec548a2899391a05bc6463b3f7162c7807ab - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h
bc22bf13b7d99ee6f80c30b569e084a2b03e385a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h
1ebfe9f0f9a7d2dd2873df82bbc78b1ec982ca93 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h
291f91212d5a37aae46a2944cf89f4b74b1d1809 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h
82aa4d6108ce6abebcbbc95afcb7a6350e287f5f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h
c4474dc1f53661c67d8fce5303dcc636d9ad3b8f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h
18814de559257f07bad8a0a9006ac9751fcfa1cb - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h
e9d692b06c70951dbbd0663a89f822153bce1146 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h
1248e113751f8ed9e4111e86a7f7fb632b102eca - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h
b921747a65c67fa093de08fa782c164d048824b0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h
7e0773f7bf13350a9fd25b0df4d6c45a55a008df - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h
8fd661537cc4eb55c167b9daae404bfb82408bfe - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h
f88f1c519a242dfa71221bdcdafc7deab14d8503 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h
ccc48726d7da49cddc4d4f86d8dbd2ad585f7b38 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h
3dc187adc0a848e68f62a6a7eb99ac02ee6502cc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h
f3b81a241efe1224798b17c062e33936469c3c2b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h
09dedebdcff3244ab8f607a7152e9116d821f9c1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h
440314f66374d35a1628ee8bd61836a80ab421eb - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h
92be535d68a7f18088921faa3f1742298ad341c3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h
84fb76f9cff38c797b139cba40175717591d49df - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h
2f92bebccb9da5246b19bd13ff0e6e79de79bc3b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h
aec1b750866e34f9626e48c535336f93c5c246fa - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070seq.h
9031642283b59ee6d52e2e1ca54332df5c2f7acc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h
e10cbe4875736ef16072232789dd3f48647c022f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h
91cccede5c4f26a6b6ca7ba4bc292f3d908a88d4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h
f47136417885a729f9c5dee375ec9dec1bd170e0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h
f523fe4a55a6a9d01f41f9f34ff149ed75b2e739 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070event.h
ad7604ced12ee18c569d2a7ebe71e185ebff3fd4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h
209ef519cb73395cea7d66016448ebc3c6bf6fe4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h
4a3e7d71b9169d703d9373ff80b02a63825a80e4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h
4d9116d23d27a3fc39c366f2685243b83ef7d485 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h
abe79ad927e7c70b7c1a8eb687052a782efcd5f4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h
ef180860a1ccbcb9f5d2f8a6656a345eef76a2a7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h
f7e56d494fea02515180f21b0f56ae0aff583be4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h
b66a45c83c84f6d458ef19fd7e0f972f2eabd109 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h
2518a62952c72ee6f3447bc8dc417129f6ac26a4 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h
9373c51ca29afec3368fb5b8c2a2f05b0920f291 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h
0ee647b929e55cf39da7e26ffc0f027676fa52fa - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h
6e5b278451308efbb6911a8ab03b0feba504d035 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h
c905766589d17fcb99a5d73846ed61f7b7db56fe - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h
323fcc6af8c30d5ef292ae90810c5c2fa2009e20 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h
382dc80790d870047db7cea957ef208d4439801e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gspc.h
825f4d976c76d375803e42967fdab53e7814d18d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h
8294d43d202a9cd78367f2e69388a6c6f2c369f7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h
cf78a847e0882e1d164eccdb86ea033126019599 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h
76c31150e2f589fbb96cfc06cdc6c1801e128656 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h
7f5548026751a8caaebc245945ccdc4bb037b566 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h
7812ba094d95c1b6d65afc6a1d26930400b8b96f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h
f1dae17e75a24c28135cf073bf29f9609a2418e3 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h
24782552a13f627e2e94ebb5f7021246a0c0dc53 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h
127f78d2bb92ef3f74effd00c2c67cf7db5382fe - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67d.h
bb79bbd1b0a37283802bc59f184abe0f9ced08a5 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0040.h
4a6444c347825e06bdd62401120553469f79c188 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h
2f87e87bcf9f38017ad84417d332a6aa7022c88f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9471.h
0d8975eec1e3222694e98eb69ddb2c01accf1ba6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0000_notification.h
c2600834921f8a6aad6a0404076fa76f9bc1c04d - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37b.h
861b9d7581eab4a2b8cc7269b5d0e0d1294048d1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0005.h
92c2dab6bc48f32f46c6bbc282c63cb4ec7a50bf - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9170.h
0285aed652c6aedd392092cdf2c7b28fde13a263 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00fc.h
dec74b9cf8062f1a0a8bbeca58b4f98722fd94b0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0076.h
a30755b3003023c093f8724cf9a2e0b0c301b586 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9010.h
cb610aaae807d182b4a2ee46b9b43ebfa4a49a08 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57e.h
bb8d15aee43e1feb76fddf80398e93fd805f1ddb - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2082.h
02906b5ba8aab0736a38fd1f6d7b4f6026a5185b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57esw.h
ccefba28a2c7979701f963f2c358b4414b84ca98 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9570.h
2e3d5c71793820d90973d547d8afdf41ff989f89 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67a.h
204feb997ba42deab327d570e5f12235d5160f00 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57a.h
03ab4e08e8685696477b62eb1a825e5198d61b8a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0080.h
545dd1899c6988ffe5f50300232bd862d915cd5b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc770.h
022e8405220e482f83629dd482efee81cc49f665 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc77f.h
36b0dd6de0d0b49d435a4662c35d1f4ae5b2b1bc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9870.h
02ff42b6686954e4571b8a318575372239db623b - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl30f1_notification.h
82c9df617999f93ebd9362851966f601b8131fdd - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc570.h
eac86d7180236683b86f980f89ec7ebfe6c85791 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl957d.h
866977d299eac812b41eb702a517e27bdc56e875 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37a.h
78259dc2a70da76ef222ac2dc460fe3caa32457a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37e.h
31939808cd46382b1c63bc1e0bd4af953302773f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl977d.h
11fd2de68ab82b81211aa20c66a9a6595199f673 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9270.h
05605d914edda157385e430ccdbeb3fcd8ad3c36 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9171.h
9db39be032023bff165cd9d36bee2466617015a5 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0002.h
76c430d54887ed14cace9409712259e10f042b4c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00c1.h
e63ed2e1ff3fe2a5b29cfc334d3da611db2aadf6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h
ea10b0d938d9314638882fdc20b9158a193f7b08 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl5070.h
f5760f5054538f4ecf04d94fb1582a80a930bc29 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc673.h
b1133e9abe15cf7b22c04d9627afa2027e781b81 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917c.h
9bd9f416844d798f352fcc6c8aaf2c251253c068 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90cd.h
04ab1761d913030cb7485149ecd365f2f9c0f7da - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0005_notification.h
fb5ef3d6734a2ee6baba7981cdf6419d013cee85 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc671.h
ddbffcce44afa7c07924fd64a608f7f3fe608ccc - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0071.h
68c953956a63ef8f7f9bcbe71057af510f4597c1 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clb0b5sw.h
38265d86eb7c771d2d3fc5102d53e6a170a7f560 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0041.h
941a031920c0b3bb16473a6a3d4ba8c52c1259d7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917e.h
a23967cf3b15eefe0cc37fef5d03dfc716770d85 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc372sw.h
9b2d08d7a37beea802642f807d40413c7f9a8212 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37d.h
e0c9a155f829c158c02c21b49c083168f8b00cbe - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc37dswspare.h
95d99f0805c8451f0f221483b3618e4dbd1e1dd8 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90f1.h
8b75d2586151302d181f59d314b6b3f9f80b8986 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc573.h
ff47d8a4b4bdb3b9cd04ddb7666005ac7fcf2231 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl003e.h
026f66c4cc7baad36f1af740ae885dae58498e07 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc371.h
15136a724baab270914a01a8c0e8f2c2c83675b6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00c3.h
4bbb861011139be1c76b521eaa7ae10951d5bf9a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2081.h
d1a19dee52b3318714026f4fcc748cfa4681cd25 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc370.h
158c98c8721d558ab64a025e6fdd04ce7a16ba9e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl947d.h
435a34753d445eb9711c7132d70bd26df2b8bdab - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917d.h
326dbbeb275b4fc29f6a7e2e42b32736474fec04 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9571.h
1409efc057e4f0d55602f374ec006f9db7ad3926 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0000.h
bd27ceb75c4604fef53658f16a5012d97c1534b2 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9470.h
e6818f1728a66a70080e87dac15a6f92dd875b4e - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl927d.h
11b19cb8d722146044ad5a12ae96c13ed5b122b6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917b.h
1efc9d4aa038f208cd19533f6188ac3a629bf31a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917a.h
c2d8bb02052e80cd0d11695e734f5e05ab7faeb5 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl907dswspare.h
4b8f95693f79a036317ab2f85e150c102ad782e9 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl84a0.h
a7c7899429766c092ee3ecf5f672b75bef55216c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9271.h
15d1f928a9b3f36065e377e29367577ae92ab065 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0080_notification.h
a26ddc6c62faac1ecd5c5f43499aab32c70f32cb - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67b.h
b29ba657f62f8d8d28a8bdd2976ef3ac8aa6075f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0073.h
c5ef1b16b2bd2e33f52b71f2b78db789ebb844f0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl9770.h
ecc56a5803b85187aa95b788aedd4fa2262c1bb6 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl2080.h
dd4f75c438d19c27e52f25b36fc8ded1ce02133c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl917cswspare.h
6db83e33cb3432f34d4b55c3de222eaf793a90f0 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00b1.h
b29ea3f13f501327c060b9ddfac5834ed396414a - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl30f1.h
4d5ccf08ab73343343e0c804002a621996866161 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0092.h
593384ce8938ceeec46c782d6869eda3c7b8c274 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl900e.h
95ca0b08eed54d1c6dd76fdf9cf4715007df1b20 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0020.h
c61f8348c2978eef0a07191aaf92bd73e935f7bd - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc67e.h
509c56534ed6d48b06494bb22d3cf58d63254a05 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc574.h
da8d312d2fdc6012e354df4fa71ed62ae4aac369 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl927c.h
5416c871e8d50a4e76cbad446030dbedbe1644fd - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl00f2.h
b7a5b31a8c3606aa98ba823e37e21520b55ba95c - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl402c.h
26c3ccc33328a66ad3bcfe999424dffda991264f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc670.h
28867d69a6ceac83da53a11a5e1ef87d9476f0be - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57d.h
053e3c0de24348d3f7e7fe9cbd1743f46be7a978 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0004.h
060722ac6a529a379375bb399785cbf2380db4fd - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc373.h
13f8e49349460ef0480b74a7043d0591cf3eb68f - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/clc57b.h
e72a7871d872b2eb823cc67c0a7d4cafb3d0ca18 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl90ec.h
ba76ecbebe0ed71ea861ed7016abbfc16ced2df7 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl5070_notification.h
bae36cac0a8d83003ded2305409192995d264d04 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl0001.h
ab27db8414f1400a3f4d9011e83ac49628b4fe91 - NVIDIA-kernel-module-source-TempVersion/src/common/sdk/nvidia/inc/class/cl987d.h
70b155b0da07a92ede884a9cec715f67e6b5c3e8 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_list.cpp
c70d946adb4029b3476873887488748162b88b0b - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messagecodings.cpp
ac08ccd5c2e3fadf10ae53e46e582489d1579ed0 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_edid.cpp
6fd536d1849ea4cce5d9b72d1dcbc1db9c818b4e - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_groupimpl.cpp
d63fed0074b22584686ad4d0cdaa4388b42194d6 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_watermark.cpp
a5df56b2cf8df9d4d8ab6fa2b3521649ef09384a - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_wardatabase.cpp
f56f92e32710b0342805b785d34ba1a9f2a54ed3 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_guid.cpp
554e6b7dadbb68ac0f3d2e368ca3fd90832ea254 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_discovery.cpp
60994cb1131d4d37b2d3fce6cc59dfea5ebb4129 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_connectorimpl.cpp
37eabb1ab51cb38660eb24e294c63c8320750b96 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_sst_edid.cpp
a0d24a4bd71f999adbaa876168adef5a7d95f2b8 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_configcaps.cpp
fa4f4869d3d63c0180f30ae3736600a6627284c6 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_merger.cpp
d991afdb694634e9df756184b5951739fc3fd0ab - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_auxretry.cpp
1543bbaba8f3e149239cf44be3c0d080c624d5ba - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_buffer.cpp
56ee9318a7b51a04baa1d25d7d9a798c733dc1bc - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_vrr.cpp
9f31213ab8037d7bb18c96a67d2630d61546544a - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_mst_edid.cpp
fea946e5320e7de8e9229bca8d4a6a14b9e8db59 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_crc.cpp
719d2ddbfb8555636496cb5dd74ee6776059db92 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_timer.cpp
f83b3c17e9f26651f12c8835a682abdd66aed3a2 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_splitter.cpp
e874ffeaeb6deec57605bf91eaa2af116a9762bd - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_bitstream.cpp
c62ef84471074a9ed428b4a03e644885989b0b83 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_evoadapter.cpp
38fe8122aba8a1bc5745d81192ec7fc75934dd0d - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_deviceimpl.cpp
66e91795dc65e1bc13c545a84556d200c8eb7bd5 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messages.cpp
4803cde0fffcf89fed46d6deaeba5c96c669a908 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dp_messageheader.cpp
fe8007b3d98dad71b17595ecb67af77b198827a0 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/src/dptestutil/dp_testmessage.cpp
62d03d24af041276ba2abb96fa1634ae4f99ea8a - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_connectorimpl.h
aeadcb0bc061b5db0fdf8aa67c1b5703976aa946 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_connector.h
01f1dd58ed5bb12503fa45be7a6657cde0a857e2 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_guid.h
07d22f84e6a386dad251761278a828dab64b6dd5 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_bitstream.h
11487c992494f502d1c48ff00982998504336800 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_internal.h
f6e1b0850f5ed0f23f263d4104523d9290bb8669 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_vrr.h
02b65d96a7a345eaa87042faf6dd94052235009c - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messageheader.h
e27519c72e533a69f7433638a1d292fb9df8772e - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_crc.h
543efa25367763292067245cbc39c1382c35df77 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_discovery.h
39aece5465100489867001bf57446bcfc4999c24 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_evoadapter.h
6e515f398e9ae1b603e49ec32576ccd0ce5d8828 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messagecodings.h
070b4f6216f19feebb6a67cbb9c3eb22dc60cf74 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_buffer.h
36e80dd13c5adc64c3adc9a931d5ebbf922e9502 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_groupimpl.h
7974abf146f1f14cd3e3854ef63ddf52ebbeb222 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_deviceimpl.h
cdb1e7797c250b0a7c0449e2df5ce71e42b83432 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_merger.h
0f747fdf03bebdcd86dbdf16d00ee2d044bc906c - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_messages.h
325818d0a4d1b15447923e2ed92c938d293dc079 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_hostimp.h
2067e2ca3b86014c3e6dfc51d6574d87ae12d907 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_timer.h
d876d77caef3541ae05f310857f3d32e642fba04 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxdefs.h
78595e6262d5ab0e6232392dc0852feaf83c7585 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxbus.h
b4d8c44957efc90ba97092987e6e43c48e85ac86 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_address.h
78c6d7d85b47636fbb21153425ef90c6d0b2d4e2 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_configcaps.h
3b74682e142e94b1c68bf619169f12e5805044bc - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_watermark.h
8f83883126b853c97e5859dafd98847ec54d36ac - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_splitter.h
7b7d9a137027fbbedfc041465987fa4ed4198ce4 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_edid.h
cca426d571c6b01f7953180e2e550e55c629f0f4 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_auxretry.h
80380945c76c58648756446435d615f74630f2da - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_timeout.h
e2075486b392d6b231f2f133922ac096ca4bc095 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_ringbuffer.h
3eea80c74a22de43b6edad21ea5873c791e093e2 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_mainlink.h
d1e8c84f279cb30978d32c784107c0247afa6e66 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_linkconfig.h
750ecc85242882a9e428d5a5cf1a64f418d59c5f - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_object.h
379d3933c90eaf9c35a0bad2bd6af960a321465f - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_wardatabase.h
e02e5621eaea52a2266a86dcd587f4714680caf4 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_linkedlist.h
5dff32bd1018e2c5c2540ea7fb571dbea596d5b1 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_regkeydatabase.h
4a098c4d09dedc33b86748d5fe9a30d097675e9f - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_list.h
5bd3706ceea585df76a75dda7f9581b91ee8f998 - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dp_tracing.h
020194b85245bad5de4dfe372a7ccb0c247d6ede - NVIDIA-kernel-module-source-TempVersion/src/common/displayport/inc/dptestutil/dp_testmessage.h
2f60ba753549b232e1b995046a356dbe0eced04a - NVIDIA-kernel-module-source-TempVersion/src/common/shared/nvstatus/nvstatus.c
ebccc5c2af2863509e957fe98b01d9a14d8b0367 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nv_list.h
cd902d07cc83444b150453d7baefd0e234c26ac2 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvUnixVersion.h
b85b49fc4ed38a241c79731a02b3b040a654a52a - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvctassert.h
764e5c4364922e3953b4db0411d1d3c3bdac99f4 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_defs.h
8f0d91e1a8f0d3474fb91dc3e6234e55d2c79fcc - NVIDIA-kernel-module-source-TempVersion/src/common/inc/rmosxfac.h
f59a2759281341e56372d3cb37b16715944dd8e1 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvPNPVendorIds.h
e015e955a05908d4a2202213353eac89f1b80ff6 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvSha1.h
b58ed1b4372a5c84d5f3755b7090b196179a2729 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nv_speculation_barrier.h
b4c5d759f035b540648117b1bff6b1701476a398 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvCpuUuid.h
4282574b39d1bcaf394b63aca8769bb52462b89b - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvBinSegment.h
a27eb14c54c6acb647a95c264b90e25f07fc757e - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvBldVer.h
5257e84f2048b01258c78cec70987f158f6b0c44 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_inc.h
963aebc9ec7bcb9c445eee419f72289b21680cdd - NVIDIA-kernel-module-source-TempVersion/src/common/inc/hdmi_spec.h
62e510fa46465f69e9c55fabf1c8124bee3091c4 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvHdmiFrlCommon.h
3bf0416186ee90833c727f01cc891bd568ea9d0f - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvVer.h
a346380cebac17412b4efc0aef2fad27c33b8fb5 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/nvlog_inc2.h
d2b4cc6228c4b13ef77e47bf30326826c5662ed4 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/nv_ref.h
06aa739230c00998e039b0104e5d73da85c322fe - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/nv_arch.h
86a59440492fd6f869aef3509f0e64a492b4550d - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/turing/tu102/dev_mmu.h
38edc89fd4148b5b013b9e07081ba1e9b34516ac - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/turing/tu102/kind_macros.h
f9311a35f375c7453d99fdde3876440b54d4cb5a - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/disp/v03_00/dev_disp.h
1ea0c3d6ea0c79c01accc7b25d15b421ab49a55d - NVIDIA-kernel-module-source-TempVersion/src/common/inc/swref/published/disp/v04_02/dev_disp.h
a26df21c3cc3eeb395428101f11da68386e0d72b - NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd14.h
8159b4189c577d545c1280d7d905a2dc2ba29fa7 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd.h
96b9560d322f43a980db5d6cc5072e9e81fdb9d2 - NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/displayport.h
249d4f7317ce68c3ceb64e2b1ee257cc75eb002b - NVIDIA-kernel-module-source-TempVersion/src/common/inc/displayport/dpcd20.h
e1b414712accfd7c690b2fdf7139f0aaf865fc47 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/Makefile
17855f638fd09abfec7d188e49b396793a9f6106 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/os-interface/include/nvkms.h
8a935bdda64e1d701279ef742b973c5dbed5727b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h
16a2e187afedf93bade7967816b0723708544e0d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset-workarea.h
20213d53bb52bf9f38400e35d7963d0f4db22f96 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo-states.h
70d9251f331bbf28f5c5bbdf939ebad94db9362d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-softfloat.h
8a6f26ccf2e563b78f6e189c999ba470ed35271d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo.h
853d9005ec695cb5a1c7966a1f93fe0c9c8278cf - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-hdmi.h
d4889d903bf4de06d85e55b005206ed57f28af69 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-lut.h
6b21a68e254becdd2641bc456f194f54c23abe51 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-framelock.h
c1c7047929aafc849a924c7fa9f8bc206b8e7524 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/g_nvkms-evo-states.h
71e8c5d3c4dfec6f2261654c3fc91210bff78da9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-surface.h
64af1df50d2a5b827c1c829a303844de20527522 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-rm.h
260b6ef87c755e55a803adad4ce49f2d57315f9a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-event.h
4f5d723c80f607a0e5f797835d561795dbe40ada - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-cursor.h
f5f3b11c78a8b0eef40c09e1751615a47f516edb - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-hal.h
d3f5bc85b538a3a1d4c2389c81001be91205ec9f - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset-types.h
9c90df1fa1b6dd33a7e330c47e94b5b9194ad419 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-3dvision.h
be3a1682574426c1bf75fcdf88278c18f2783c3f - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-dpy.h
8f1994f3f8d100ddcf8b23f5b24872bed939d885 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-vrr.h
75e8a8747795fad89b4d2b662477e5454863dcc7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-flip.h
d7861e2373ac04ffaf6c15caeba887f727aa41fb - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-dma.h
182a47c12496b8b7da1c4fe7035d6b36d7316322 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-prealloc-types.h
248d900394aa2b58669300af4f5d26eac23edd23 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-types.h
ef78e73ec9c0b8341bd83306d1f3b2c35e20c43a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-utils.h
867e3091a945d3d43b2f28393b40edeb9d27597b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-rmapi.h
c1904d38785649d2614563d0cd7de28a15ce4486 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modeset.h
cc09ecd5ab724b244017929444309f8e77fc5a63 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-modepool.h
412d8028a548e67e9ef85cb7d3f88385e70c56f9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-console-restore.h
33dbf734c9757c2c40adb2fb185e964870217743 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-flip-workarea.h
ebafc51b2b274cd1818e471850a5efa9618eb17d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-prealloc.h
4020b2a0d4f177c143db40b33d122017416dfa2e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-evo1.h
be6e0e97c1e7ffc0daa2f14ef7b05b9f9c11dc16 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-attributes.h
9dd131355ed1e25a7cee7bfef00501cf6427ae92 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/nvkms-private.h
17f6fbbd5e0a75faec21347b691f44dcb65c01aa - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-connector.h
4625828efd425e1b29835ab91fcc3d2d85e92389 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h
a8fbb7a071c0e7b326f384fed7547e7b6ec81c3e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-timer.h
52b6c19cce320677bd3a4dfcf1698b236f29e59e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/include/dp/nvdp-device.h
a0cc9f36fdd73c99ad8f264efa58043d42353b0a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/lib/nvkms-sync.c
381fba24abae75d98b3ada184ed0cd57335819a9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/lib/nvkms-format.c
281fdc23f82d8bdb94b26d0093b444eb0c056f51 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-sync.h
445a409950ab8f36cfa24d1dc73e59718d335263 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-api.h
2ea1436104463c5e3d177e8574c3b4298976d37e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-ioctl.h
5c4c05e5a638888babb5a8af2f0a61c94ecd150b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-format.h
910255a4d92e002463175a28e38c3f24716fb654 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/interface/nvkms-api-types.h
e48c2ec8145a6f2099dddb24d2900e3ae94ec02e - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h
727bd77cfbc9ac4989c2ab7eec171ceb516510aa - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h
009cd8e2b7ee8c0aeb05dac44cc84fc8f6f37c06 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/interface/nvkms-kapi.h
fb242aa7a53983118ee019415076033e596374af - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h
f6875ef0da055900ef6ef1da5dc94cba2837e4d0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi-channelevent.c
01d943d6edb0c647c2b8dbc44460948665b03e7a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c
394ea31caa5957cfb2c8bb8c3cc0e4703213fe7f - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/kapi/src/nvkms-kapi.c
3f978853dfa0435b746ff8c954b8e5e5f0451b43 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-modepool.c
85ddb19f89833ca57fd2deff2e2b4566e162a56c - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hal.c
8415bcd6ab34e356374659e965790a0715ed7971 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-prealloc.c
c98f76bcfc7c654a619762ebc3a2599f9aa89f8d - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-3dvision.c
5fb73f35841c41e7376531732cb12303224e61ad - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-lut.c
e9626eee225e58ec2d5be756c5015775ca5e54b9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-vrr.c
86da3c7c09354d2c49d95562aba15cbedb543d9b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo1.c
89baced4cf1a96b7693c9e2f85b01093bbba73f7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-rm.c
7ef594aea1e80408148c3661477a4edc6e8d8d50 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-flip.c
07c2f10473e2fbe921b2781cc107b5e56e6373e3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-attributes.c
d28cd72c8dca4cb54a15630b80026eca57a9ed80 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo.c
da726d20eea99a96af4c10aace88f419e8ee2a34 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-event.c
5c79c271609ebcc739f8d73d7d47f0b376298438 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-rmapi-dgpu.c
b55665d7bceaad04bbf29a68f44536518302c3d6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo2.c
6b79c2ce1658722fa6b3a70fb5e36f37c40d8f96 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-modeset.c
1918ca3aa611cd9dfc79d46d038ab22706f0b1ed - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor3.c
add6682206360cb899ae13bae6dc2c19d830d7b7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-dpy.c
c2870190ca4c4d5b3a439386583d0a7c193d6263 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hw-states.c
f27f52dc428a6adeb936c8cf99e1fc2d8b0ad667 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-dma.c
5acf19920d56793d96c80e8461b0d0213c871b34 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-surface.c
c2d0e6bef0c4929a3ca4adfd74bd6168fa4aa000 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-framelock.c
673ad86616f9863766bfec0e118c918297d32010 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/g_nvkms-evo-states.c
c799d52bdc792efc377fb5cd307b0eb445c44d6a - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor2.c
0d39e349fdf33d550497527fc8d43f14e752df6c - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-hdmi.c
8f22c278a5839d36f74f85469b2d927d9265cb80 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-utils.c
eb09642e8b5d9333699f817caaf20483c840b376 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms.c
ab17e5b4cafa92aa03691a0c187ef8c9ae53fa59 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-cursor.c
574b1268ff83e4e5ed4da15609247a5c0ec8f51b - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-console-restore.c
b44193cbf1371ca1abfda36e705edbad1d473e88 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/nvkms-evo3.c
8af6062034d464f778969e26d3bf5a9b4cdaccf0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector.cpp
69fed95ab3954dd5cb26590d02cd8ba09cdff1ac - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp
6b985fc50b5040ce1a81418bed73a60edb5d3289 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-timer.hpp
f2a05c29383bfc8631ad31909f31a8351501eb27 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-device.cpp
31767fd551f3c89e5b00f54147b6a8e8fa3320e3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp
110ac212ee8832c3fa3c4f45d6d33eed0301e992 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-host.cpp
51af3c1ee6b74ee0c9add3fb7d50cbc502980789 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp
f96cd982b4c05351faa31d04ac30d6fa7c866bcb - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-timer.cpp
f6c3e8bd4ee13970737e96f9d9a3e4d8afdf9695 - NVIDIA-kernel-module-source-TempVersion/src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp
9767fbc3273e17e7b2e68374bfab0824bea34add - NVIDIA-kernel-module-source-TempVersion/src/nvidia/Makefile
c5f16fdf43ca3d2845d120c219d1da11257072b0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/nv-kernel.ld
d1089d8ee0ffcdbf73a42d7c4edb90769aa79d8c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h
aba0bd796d932fa19e8fad55ed683ae57d68bffb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-priv.h
1d8b347e4b92c340a0e9eac77e0f63b9fb4ae977 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h
499e72dad20bcc283ee307471f8539b315211da4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h
40cb3c112bbcb6ae83a9186d0c9fa1857cf6a126 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/os-interface.h
1b53bbf5f8452b8057ff2dd7828947a047db38d0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv_escape.h
3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h
e3679844971ecc4447259fb1bdf4fafbbdff2395 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/osapi.h
4750735d6f3b334499c81d499a06a654a052713d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-caps.h
1e89b4a52a5cdc6cac511ff148c7448d53cf5d5c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/os_custom.h
d576ede913ef8cf4347ef0e8dbfe9c2d992b7965 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv.h
ddfedb3b81feb09ea9daadf1a7f63f6309ee6e3b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h
9c7b09c55aabbd670c860bdaf8ec9e8ff254b5e9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h
cc3b2163238b2a8acb7e3ca213fb1ae6c5f0a409 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/osfuncs.h
2f5fec803685c61c13f7955baaed056b5524652c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h
e08f597ce97fb1691bcea37b4d017831a457d027 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/include/nv-reg.h
6ebda7ea5b17b7b9bfa9387fc838db9f0c3405a5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osinit.c
b5b409625fde1b640e4e93276e35248f0fccfa4c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/gcc_helper.c
9d9035afd7af31f30cdbf2d4c75e5e09180f0981 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osunix.c
21ac9d6932199ce0755dbead297eb03c9900f8c9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c
49dc935d4475b572478c63324f0832c972a4277d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/os.c
532366fd9a288a812eca78b92b304ba3625f8c0a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/exports-stubs.c
006e77a594ae98067059ad3d7e93821316859063 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c
f134270af5ecd7c5ba91bf5228fe3166b101dd6e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/escape.c
690927567b5344c8030e2c52d91f824bb94e956c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/registry.c
53cd45a8121f8acb72be746e389246e1424176f7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osapi.c
05b5aa5ad6a7df974f05608622ae260d70a550db - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/osmemdesc.c
fb5272f3d0e465aedbc99ddcabb1c6c428837a6e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c
0cff83f4fdcc8d025cd68e0a12faaeead09fa03b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/tmr.h
7df66a87c9498ae73c986e60fcb9cb1cbcd19e19 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/objrpc.h
1feab39692ea8796ac7675f4780dfd51e6e16326 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/objtmr.h
28d6a6ae495d9bc032c084980ebf5d94448bcf29 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_headers.h
31deee778df2651d3d21b4d9c8ab180b8dc1ff14 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_vgpu.h
961ed81de50e67eadf163a3a8008ce1fde1d880c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_hal_stubs.h
4db7387cc1ce08ccc62404b80b19c7f1b685e746 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc.h
e4d88af4eb51d32288f913d90e490e329884970b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/kernel/inc/vgpu/rpc_global_enums.h
35da37c070544f565d0f1de82abc7569b5df06af - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nv_firmware_types.h
df4d313c66e75fa9f4a1ff8ea2c389a6ecd6eb3d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/acpigenfuncs.h
bff92c9767308a13df1d0858d5f9c82af155679a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nvacpitypes.h
db0dc6915302888de06e3aa094d961cfe25e0059 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/nvrm_registry.h
059c1ab76a5f097593f0f8a79203e14a9cec6287 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/deprecated/rmapi_deprecated_utils.c
d50ff73efaf5bc7e9cb3f67ed07ede01e8fad6f6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/interface/deprecated/rmapi_deprecated.h
671286de97aa63201a363fd7a22c92ee8afe4c7c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/eng_state.c
6fa4ba2da905692cd39ec09054f2bd6621aa2a7a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_resource_desc.c
5a97d4f8ce101908f1a67ffe9cc8ed00b6bf43b2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_resource.c
1653c7b99cfc86db6692d9d8d6de19f1b24b9071 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_uuid.c
caf2b80fa0f01b9a3efcd8326bf6375455f2e1b9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_access.c
4e1be780ac696a61f056933e5550040a2d42c6bd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_device_mapping.c
0824d200569def5bf480f2a5127911ed0ea881e6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device_share.c
f6b4e40b638faf9770b632b404170e1ceb949be5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_gspclient.c
db44a803d81d42bfaf84f7ea1e09dc53c662acef - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_timeout.c
9515ea68cdac85989e4d53d4c1251115291708dd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu.c
08be13ced6566aced2f3446bb657dae8efb41fbe - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_rmapi.c
77573c8518ac7622211c4bdd16524d369cc14b96 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device_ctrl.c
fa854efc5cdf4d167dee13302ee8377191624d95 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/device.c
89543f7085fbc2ca01b5a8baae33b5de921c79e9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c
0e4c2d88b61a0cf63045fe70e5ba2c81c44e37af - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c
acb2a62fb60e08eb6d16518c43c974783139813b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/timer/timer.c
834efbfff64c0d01272e49a08bd6196e341985a8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/timer/timer_ostimer.c
dd0bd914c6c7bfeabdd9fe87fb984702e0765624 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_objs.c
19447ad30b3fc2ee308bcc45e3409bafa5defe0d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c
3abbef0a6fc95d6f7c7c5a16cbbbb51aaa457cc0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_sf_user.c
0918cada217ca1883527fe805fc30babf7b8038d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_channel.c
e1a6dfb38025abeb5adfda929f61eb6ee44b5c84 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c
ed25b1e99b860468bbf22c10177e0ba99c73894f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/disp_capabilities.c
8cd12c2da71acede5046c772f14aff7cbd88af12 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/kern_disp.c
01e8b56f7677f5cb7f950d9aa9bd37d04153085b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c
629566bf98be863b12e6dc6aab53d8f5ea13988c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c
b41502d73d7781496845377cebd0d445b8ca9dc6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c
8a418dce9fbeb99d5d6e175ed8c88811866f3450 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c
e26ade846573c08f7494f17a233b8a9e14685329 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/disp/head/kernel_head.c
d6e1bd038fa0eff5d3684a5a2c766fdac77f1198 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c
d4a07d1c6beb7ddb229ed6e5374343b6ce916d84 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c
bc2b57acc8fa8644615168e3ddbaf7ac161a7a04 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c
2bb921b462c4b50d1f42b39b4728374c7433c8cb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c
086e9a51757c3989dfe0bf89ca6c0b9c7734104a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/generic_engine.c
56be7a21457145c3c6b2df7beb4c828b7bd1a3b4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice.c
5be208cc0e1eae1f85f00bb0b502fdba74d6656c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c
a64c51c515eb76208a822f1f623d11e2edd8d7ac - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c
a54628e9d2733c6d0470e1e73bca1573e6486ab3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c
1f4d15f959df38f4f6ea48c7b10fc859c6e04b12 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/audio/hda_codec_api.c
c3b93cf7e3c97beb1072135a58d211f67722ad10 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c
7db9691e2078d4b093f2e09c8ba0e6245e505ef1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu/dce_client/dce_client.c
f89e982b0e31a1898e1e4749c9a8ae9f0bb59a0c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/deprecated_context.c
d92267a3394ded5d7d218530fd16ce00a920b1d6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/alloc_free.c
2279fd14aab9b5f20b8fc21f04dd0fca41e418c9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event_notification.c
11a547cbfdbce000a6e5edf48492f5b930ddbdca - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rs_utils.c
81f66675295315cfc52be225c2e9ee912b56fbac - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/sharing.c
569f56831cde7bdc528ac2e543eea485025ec6f0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/client.c
05669e008dfd89e5c81381e6c60230c1fe17a876 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_desc.c
820b6e63c2b11b0764305c483142f626b6f72038 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rpc_common.c
bc83726df04c30d02a1852a10a22c77fdb3ef7a7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_desc.h
5f194ba056b018a8194c16b0bbb6e49c1b80a996 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/param_copy.c
e40f6742084cd04252f3ec8b8499a26547b478bc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/mapping.c
ac6a5b3adf15eac4a7bd9ae24981f6f5fc727097 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/deprecated_context.h
3a0f999e390d93b0db8272f55fbec56f6b055fe4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_utils.c
78f1e379c3d1df9e34baba77f78f48b8585bdc74 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event_buffer.c
8e40d2f35828468f34cf6863f9bf99c20dbfc827 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_cache.c
b441ee824e9c15c82956254704949317024ceb41 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/entry_points.h
277441b3da96fc01199f1d2f5102490e2e6cd830 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/control.c
38d0205b68ea2c82709b42eb7e8b9cf92cec8828 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi_stubs.c
2f89b9059467e7f67a6a52c46aecae5cb0364ab6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/binary_api.c
46aa43b18480d2eb7519b2dcd0fe6a68c79b8881 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource.c
f2c7d77e4183994d7ee414e2a87745fcd23d995e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/mapping_cpu.c
6f46dd43e4b3f2ad803a4c9492cb927aebffc1f0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/client_resource.c
59d42b6a123b062237b3b6ca382211e35057ef1e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/resource_list.h
ddaf2b8e424df9147a4e2fecf3942b64b1d2b001 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/entry_points.c
68cc7b258f934097e9dc31a38e7e3bf2ce2fe5d1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/event.c
c3820fa4bb1192a9317ca834aeee3434c7eb8059 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/rmapi/rmapi.c
ea7be8a55a3310aa1c3926ed69c86a6491925e08 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/nvlog.c
70507a8d43797eb3cdc13408ae8635f4a2eebce0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/nvlog_printf.c
b3a29311cc22e2dae686f8ed2df6bc828aa826cf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/diagnostics/profiler.c
af4ffa4b423e07cf40eb863c11dbf515c7104874 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_timer.c
1793e056a0afcc5e1f5bb58b207b49c5f1556eca - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_stubs.c
63e5e17280d865ace8cdd8eb8a2598d3d7830ad7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_sanity.c
8e5af753de1725dd919185c29d03ccb0934fab6e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/os/os_init.c
8d96c1b4c00f3a029ba8c27dd2e8e88405c3a1b6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/locks_minimal.c
c0822891f614e6ec847acb971e68aad8847e0cd7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/locks_common.c
c68f2c96bfc6fce483a332a5824656d72986a145 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/system.c
37000b419d23a8b052fc1218f09815fafb1d89c9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal_mgr.c
7b9c95f912b203c68b6ba1f62470dffee4b4efe3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/thread_state.c
677c655b0b8e86bdab13cdd4044de38647b00eec - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/hal.c
8eac3ea49f9a53063f7106211e5236372d87bdaf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/info_block.c
b9eabee9140c62385d070628948af0dcda3b0b1a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/core/hal/hals_all.c
003e3012e87b8f8f655749db88141d74660e8d8e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c
a5a31b9b62e6d19b934411995c315d4fdac71ca0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_db.c
37d1e3dd86e6409b8e461f90386e013194c9e4d1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c
ed24c0406c85dc27f0fca1bac8b0dcb7a60dca2d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/gpu_mgr/gpu_group.c
6aa752ae480e883d077de842f02444151947f82f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c
956b7871a267b7d381d1cd7d4689ef1aec1da415 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/mem.c
9d9fcd87d784a758659b6cc8a522eaf9beac4b6c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/standard_mem.c
15f3290908931a9e4d74b0c0ec9e460956e39089 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/system_mem.c
623dad3ec0172ed7b3818caece0db5687d587ff3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/os_desc_mem.c
64bd2007101cbf718beb707898e85f40071ae405 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c
94acdcebee0cdcbf359b15803ec841e5284e1ff2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/vaspace.c
079893039c2802e1b0e6fcab5d0ee0e4dc608c84 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/io_vaspace.c
5b9048e62581a3fbb0227d1a46c4ee8d8397bf5b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h
78cbb6428372c25eba0ccf8c08e7d36d18e4bae8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/lib/base_utils.c
6d5915924b4e26a5e7592427e34b77596162d0fe - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/lib/zlib/inflate.c
cade0f7049cdb2ab423a073887ed20ba1abdb17e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/utils/nvassert.c
8a4e2aec6fc01ce1133cfc7ef80b6363c5394208 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvoc/src/runtime.c
8ed5171254e51e59fc5586e729793831165b8c0c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/tls/tls.c
206dda159ecbc0340ac9329250302c76a504e5a8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c
d48d51a880fced52ad6e323d984e872ccf9ef3bd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_client.c
d0ae6d7a363db3fdf54ae1a760630b52a2019637 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_resource.c
883ad1cf4ed1714eb74d44d3b9a41d6a4723b650 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_server.c
0c9581aa68a77cb9977a7fbcfd2077ccb618206e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_access_rights.c
dac54d97b38ad722198ec918668f175dc5122e4e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_access_map.c
1f2e9d09e658474b36d0b0ecd9380d0d2bcc86b2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/resserv/src/rs_domain.c
d3e5f13be70c8e458401ec9bdad007dfadedcc11 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvbitvector/nvbitvector.c
836ba8b401fb6b6fcf4ccde1b644ebaefc3d8ee1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/ioaccess/ioaccess.c
9c40bfebe2c57b972683e45dc15f358aaa2280f8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c
8f41e7127a65102f0035c03536c701b7ecdaa909 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/string/string_generic.c
b528ef8e238dd2c22c6549057b54fe33039c6473 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_tracking.c
b6d6074ca77856fc5fe4ff1534c08c023ee592a4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c
caff00b37e7f58fde886abcc2737c08526fa089e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/memory/memory_generic.h
66e79047600e0a40c50e709c6c82402d9b205ad0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c
da86b765702196eb0011ac9d14873fbc1589d48b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c
7cdc50ee31b9cde14c0ce6fcd390c5d4564e433d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/cpu/cpu_common.c
a305654bafc883ad28a134a04e83bbd409e0fc06 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/cpu/cpu_common.h
2fa76d2d5ba7212f826b656aa683223a470e484c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/core/core.c
6f6c83e9ee6d91fc8700e5015440f2bc72e6600b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_rwlock.c
9b69fbf3efea6ba58f9ba7cb0189c9264c994657 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_common.h
b55b7b59f35d848d5a3b43d63da4d2f7b0af5d3e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c
7416712aa964befcf8fede86e5a604871a2d00b8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/inc/sync_rwlock_def.h
6dd0c5f2384610ea075642d8e403ddd8c8db371a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h
87ac95cf569bb550adb3577c6a6658d094c59999 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_gcc_clang.c
a045a19d750d48387640ab659bb30f724c34b8c8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c
f0c486c1ad0f7d9516b13a02d52b4d857d8865b1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/nvport/util/util_compiler_switch.c
595a6238b9f04887dd418be43ff31f3e7ca6b121 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/map.c
4418c0344b64740050ff8ef6ee085f0687a323d4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/list.c
057ad074f6252f7809a88f918986d7d5aacff568 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/queue.c
2389c9dd3b13fd2ff26d2d1342c515579079bc71 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/multimap.c
2975e5cecee2c1fd5f69a8ffc20a49016e83025c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/btree/btree.c
f0ce913eb568f85e6e1c1b8965f2cd2b98e81928 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/src/libraries/containers/eheap/eheap_old.c
cba2c17804f6f2062dc5d75583e4a03e03016d1d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_capabilities_nvoc.h
133e94f73c781709f407b03d8cdfdd8865c39b4b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_sf_user_nvoc.c
801eb295d07258ad70b99cb0fe85f3421690e0c4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_private.h
46c1a2066ead316ea69c60dc323bdb649bc11c0f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_binary_api_nvoc.c
f9bdef39159a8475626a0edcbc3a53505a0ff80a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_hal.h
958d9a2cddc91edfafb5c2f3d9622443ac49a6ef - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_objtmr_nvoc.c
d405e01478d26ea99cc0012fa2d6e0021bbe6213 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_db_nvoc.c
182602832a033b3e2d5f88d4ba8febe63eeb2f9e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_resource_nvoc.c
376572489e0d4211663da22d5b0de7c7e740fb29 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_mgr_nvoc.h
e3c4822ac998ab5c7946919c85011f6172dc35ee - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_nvoc.c
fa5e1c6001e60f77415d0a8f87c8b548b12e1217 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_mgr_nvoc.c
ddc0ac4e1d8b8aef15e147f1f85f8df37c196763 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_register.h
4fe5357eabd0c5e351fb965ceead308240f68eb1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_objtmr_nvoc.h
4f4acfdefc7b9a0cdfe2d5840cc18c9c33366053 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_object_nvoc.h
1d66bab50a7d39faa2b0fec469a4512d2c7610d5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_util.c
fbcbeb92e46ba11ac26c04c9688b3ffcf10f5c53 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_prereq_tracker_nvoc.h
e449382e19e4dcfcf0aec0babe5a1c8ce2f4249b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kern_disp_nvoc.c
87a5ae8e07103074020ba052ca45ab39e918d3bd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_nvoc.c
47b7744ddd01b821bf2fd25fdb25c8d6d55ee01d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_prereq_tracker_nvoc.c
c46cae4a17181c48bafc01237b83537df61c41ae - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_nvoc.c
f42bfa3b5a801358d30f852625d8456290550f46 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_inst_mem_nvoc.h
59a87763c6abdc54828f2785a7d90e43e607bc87 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_inst_mem_nvoc.c
da3cc08f12ccee23bcb1c0d0c757b8bbcb81e4fd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_server_nvoc.h
6fd6953e4ae0af707376a40ea0e4f3e70872be7b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_desc_mem_nvoc.h
162777624d03af2f17dfdc28bc35143e2ec6cdee - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_nvoc.c
b82e5db65ad41764f456d6f924c89d76c165e48d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_nvoc.h
63e9d0416d5ca1fdf547b5fba9ec76e54690c9dc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_ref_count_nvoc.h
26b240cb74736e7ed85cb0775e4ddda45b3a804e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_nvoc.c
499a3d9c61a86b667cc77cf8653a71f7fe85078a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nv_name_released.h
ac842d9de5eae74ef02b0a75259fb016b80c6eac - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_objs_nvoc.c
88d336f88c9b72ec2c1352d4ebe00c0831eafbca - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_sdk-structures.h
fb78615cde6323784f51d33f2acd61fd4030fee0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_device_nvoc.c
213ebb4fdfa3c2f64b5f998e2ad990e448d4a104 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nv_debug_dump_nvoc.h
a6174ad345cfdf926cbb4c86c7e8eeadfccb0ddf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_class_list.c
fa785f8138598af783aefecf10b141d524e6bb42 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_virt_mem_mgr_nvoc.c
de97c5afdc34cb9aff23c3ba166e21f660cf1f47 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal.h
f9bdef39159a8475626a0edcbc3a53505a0ff80a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_private.h
53b2c39666e1da206d44d69d54009f20440503bc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_state_nvoc.h
93f9738c0e8aa715592306ddf023adf6b548dcc4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_nvh_state.h
2b49950ba8f540ed4231c3334810edbb212bb859 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_resource_nvoc.h
d614f90730e2ee78bc3aae47b4e7976500e166e7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_io_vaspace_nvoc.h
4302502637f5c4146cb963801258444f2d8173e1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_allclasses.h
7bb406aa863430507bdf07b5f3e519c0d756220a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_resource_nvoc.c
6f3fc9676df77fa24c49140331b87ed5988ed57c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/rmconfig.h
cb02e66e5fc06aa340ab460c977961701e9ba295 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_subdevice_nvoc.c
079ac6d2a90bd2fc9413e092a729202dbc5f724a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_mem_nvoc.h
65d1ace1e68c9b39cce6db61aa8b86ee47a0ae4b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_halspec_nvoc.c
e0988b45cf712f1a7662b6f822eaed3ffd9938f3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgmt_api_nvoc.h
40c937ca657bda9c0b67bd24c5047d39e596c16c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_channel_nvoc.c
f8e842add67dc070cc011ea103fc56cfd81c8b9a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec_nvoc.c
3a5457a216d197af8f120c660690a55ee44bdd8e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_generic_engine_nvoc.c
21e3cf689d84b1a28e11f66cc68a0bc6713108b0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_server_nvoc.c
edead99d125425ddf8f2fa4e4261b8cc3bf566fc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_standard_mem_nvoc.c
b07c2c5e8df4de2bb9d242fd1606f1a57b8a742d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_io_vaspace_nvoc.c
bfabd5155af3172e1c0a5a0b66721ff830c7b68f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hypervisor_nvoc.h
cc635daf3d7a9a176580951841b82e9eb0d6f5ad - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kernel_head_nvoc.c
757b3ecf94d0c8914a32c4bd302f8ccfa4027856 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_syncpoint_mem_nvoc.c
6263c1ceca0797d34a102f9846acd1fdef06fb60 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resserv_nvoc.h
3b0e038829647cfe0d8807579db33416a420d1d2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec.h
abda8536d885be1422810c184b936bbc880972eb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_desc_mem_nvoc.c
f6f40d568bcf2ae89547ad054f9b5357bac366ab - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_nvoc.h
ceb4dd72148dfe4a0581631147e8d7636abfd61f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_chips2halspec_nvoc.h
41784541b2e9ee778b52e686288fe492c0276fec - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_mgr_nvoc.c
d32d0b65f5f76cb56ca7cd83c0adfe5cb5330924 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_nvoc.h
d04adc777f547ae6d1369cf4c94963e5abf90b86 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_context_dma_nvoc.c
ac3965eea078f1998c3a3041f14212578682e599 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_vaspace_nvoc.c
0dae533422e24d91a29c82d7be619160bbb6f6be - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_context_dma_nvoc.h
3f5a391895fc900396bae68761fe9b4dcb382ec0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_buffer_nvoc.h
9eb042cd3feb89e0964e3f4b948ee690f02bf604 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_dce_client_nvoc.h
285af0d0517cb191387a05ad596f74291ec81737 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_desc_nvoc.h
9646d1c4d472ad800c7c93eec15cc03dd9201073 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_objs_nvoc.h
c370a103a4c1c9cf2df3763988e77ef8f7bc6afb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_db_nvoc.h
2239839c8a780a87e786439a49ab63e25d25001a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rmconfig_util.h
09597f23d6a5440258656be81e7e6709390128f8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_private.h
8e0e60f6d30bbed679c43b4997875989314ee88c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_dce_client_nvoc.c
dec0f585ca46dc8e1aae49c8ea58db5a415de65c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rpc-message-header.h
871fd0260ab9c164b8f6a7d1aba4563af622f1ac - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_channel_nvoc.h
205490d6651110f28009e752fa286f818bed22fb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_syncpoint_mem_nvoc.h
07a37ff685e68a703455e0ed7db7940697487ed2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_nvoc.c
cc71518b4151dc2ee0592bbd2866d437043d0e1a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kernel_head_nvoc.h
2c28d729456749f16ae03fb48b1e416706762805 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_resource_fwd_decls_nvoc.h
59c3612a596ad6b996c9d1506f9893bd1b5effee - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgr_nvoc.c
81a6a28692f50efeebecad125de0585dd711ff36 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_device_nvoc.h
3f581df19314b273244c4c42ea915ec8ef0d8ce2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_client_nvoc.h
e839f8a5ebef5f28818bb5824bd7c52320db9a74 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_sf_user_nvoc.h
e0b8f64c042dcbb6340552cb3517dabdeb490f1b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_nvoc.h
7523c2ee9228ad0e2fb3566b23b9720d7896afae - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_state_nvoc.c
ad50b3dbe1685eefe51c4fc296f3eade70789dfb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_resource_nvoc.h
ca042cfcdfe8cc8a141f8bb5c9e6c05d8a71b707 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hda_codec_api_nvoc.h
2ab6933e07a84c64dfcbeef3b3f4e3f14249d8c8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_tmr_nvoc.h
ffd4f01212709e321d4097e424fe5d32038f5d8b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgmt_api_nvoc.c
12776c69191b583ffcf0914697cf41802f52ef01 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hal_archimpl.h
05cb2fed8648f07b54dc2e8bacbafb323ea8262e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_standard_mem_nvoc.h
0b15dd4515c5e436a659883a48e62bf3c68bf439 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_nvoc.h
0269da77a8db8efde1debc8236f2b3de2cd2597e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_eng_desc_nvoc.h
1bdccdbabf5ae52fd65b829c35079bb7a8734939 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_tmr_nvoc.c
410a759c949904b7ae1eecafb31143fad579c0a1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_client_nvoc.c
73c598515eb7985c8f4cace0946ec9613960be6c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_group_nvoc.c
73a37ad59b9b13b61eb944748b6c2ba3cad7b630 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_traceable_nvoc.h
8915f69e67e1f3a809a5479e36280df06ce8dd90 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_system_mem_nvoc.c
d792fbb20b6ca5f2d62addf6a94b0c5027ae15fe - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_subdevice_nvoc.h
6124890a54e529dff8b9d6ecf8f4bebe1e10a8a2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_os_nvoc.h
cb03502bf603c88b709ec803b60efd1d6f8e5ee1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rpc-structures.h
b378d336af4d5cb4b1fb13b85042fad1fe02f4cc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_journal_nvoc.h
7c1b36cca9e8bf1fe18284685a6a80620df348cb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_nvoc.h
cd833a822c1ce96c79135ba7221d24f347ceadb1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_mgr_nvoc.h
a016a7d8e07389736c388cb973f3b2a177ea917d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_disp_capabilities_nvoc.c
42d784e8b478bbf48293a805aa227f0abdf1923b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_nvoc.c
b29061454e7d8daa0cef0787f12726d105faf5c4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_resource_nvoc.c
4b9f2ee66b59181f226e1af5087db6ea80f1ee27 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_virt_mem_mgr_nvoc.h
23d16b4534103f24fac5bb86eb8bab40e5bcba57 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_hda_codec_api_nvoc.c
e48b8b6ba9da5630a7ade526acbb94e50d9b636d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_vaspace_nvoc.h
b86536778197748c707c3e9e4c73c5fbcb037e32 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_generic_engine_nvoc.h
07fd5f5534a6d751107f582ba187c7a53a139954 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_rs_resource_nvoc.h
f4a5684d5a877b90c7ae7b66436117c6feb65f91 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_mgr_nvoc.h
ab79a1418b65b9d65081456583169f516dd510c9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_event_buffer_nvoc.c
bd048add5f0781d90b55a5293881a2f59ace3070 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_binary_api_nvoc.h
e50c91a674508b23b072e0dd2edbf743f24b333d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_object_nvoc.c
df070e15630a11b2f4b64d52228fa5a6e7ab2aa9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_halspec_nvoc.h
0f3140b5eae77a6055f32a91cb13b026bbb23905 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_kern_disp_nvoc.h
76b1f545e3712a2f8e7c31b101acd9dd682c52f8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_traceable_nvoc.c
14450b18d002d4e1786d4630ef4f1994c07ef188 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_odb.h
7b0201852361118f277ee7cc6dd16212c0192f71 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_gpu_group_nvoc.h
3d3385445934719abda1fefd4eb0762937be0e61 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_client_nvoc.c
c4fde03d5939b0eef108fde9c2f10661568f22a9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/generated/g_mem_nvoc.h
5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/os/dce_rm_client_ipc.h
76b24227c65570898c19e16bf35b2cad143f3d05 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu.h
61c7d3ac2dc61ee81abd743a6536a439592ee162 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_child_list.h
bf894a769c46d5d173e3875cd9667bb3fe82feb9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_timeout.h
f17b704f2489ffedcc057d4a6da77c42ece42923 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_resource.h
0e8353854e837f0ef0fbf0d5ff5d7a25aa1eef7c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/eng_state.h
426c6ab6cecc3b1ba540b01309d1603301a86db1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/eng_desc.h
c33ab6494c9423c327707fce2bcb771328984a3c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_halspec.h
6b27c9edf93f29a31787d9acaaefb2cefc31e7d4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_device_mapping.h
1938fd2511213c8003864d879cf1c41ae1169a5f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_uuid.h
cf3d1427394c425c543e253adf443192ca613762 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_access.h
ce3302c1890e2f7990434f7335cb619b12dee854 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gpu_resource_desc.h
97d0a067e89251672f191788abe81cf26dcb335f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/device/device.h
61711ed293ee6974a6ed9a8a3732ae5fedcdc666 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h
b39826404d84e0850aa3385691d8dde6e30d70d4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h
51a209575d3e3fe8feb7269ece7df0846e18ca2a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h
277a2719f8c063037c6a9ed55ade2b1cb17f48ae - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h
74bc902cd00b17da3a1dfa7fd3ebc058de439b76 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_channel.h
be7da8d1106ee14ff808d86abffb86794299b2df - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/disp_objs.h
576216219d27aa887beeccefc22bcead4d1234d7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/kern_disp.h
5179f01acf7e9e251552dc17c0dcd84f7d341d82 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h
9a33a37c6cea9bad513aa14c942c689f28f7c0d8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h
f758ea5f9cbd23a678290ef0b8d98d470e3499e0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h
6756126ddd616d6393037bebf371fceacaf3a9f1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h
20416f7239833dcaa743bbf988702610e9251289 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h
a29f55d5fbc90dade83df3ef3263018633675284 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h
82abc2458910250c1a912e023f37e87c1c9bbb9e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h
889ba18a43cc2b5c5e970a90ddcb770ce873b785 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h
b52e6a0499640e651aa4200b2c8a1653df04a420 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h
24d01769b39a6dd62574a95fad64443b05872151 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/subdevice/subdevice.h
efc50bb2ff6ccf1b7715fd413ca680034920758e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h
ccca322d29ae171ee81c95d58e31f1c109429ae7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/gsp/message_queue.h
1e3bebe46b7f2f542eedace554a4156b3afb51f1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h
ce4e0f7177f46f4fc507a68b635e5395a3f7dde6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu/dce_client/dce_client.h
5f60ac544252b894ac7ecc0c6dc4446e6275eae5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rmapi.h
2baec15f4c68a9c59dd107a0db288e39914e6737 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/client.h
a92dbf2870fe0df245ea8967f2f6a68f5075ecaf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h
61e3704cd51161c9804cb168d5ce4553b7311973 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/resource.h
99a27d87c7f1487f8df5781d284c2e9a83525892 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/binary_api.h
497492340cea19a93b62da69ca2000b811c8f5d6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/event_buffer.h
f3028fbcafe73212a94d295951122b532ff5445b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rs_utils.h
b4bae9ea958b4d014908459e08c93319784c47dd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/event.h
ac9288d75555180c1d5dd6dd7e0e11fb57a967f2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/exports.h
2b23f2dbd8f3f63a17a1b63ebb40a2fd7fd8801a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/alloc_size.h
c9cb08c7c73c0bdd75a320640d16bf4b4defe873 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/mapping_list.h
f19dad1746e639d866c700c2f871fcc0144f2e5e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/control.h
f1713ecc0b3e58e46c346409dbf4630aa6f7f3ed - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/param_copy.h
255c28b9bd27098382bace05af3ad7f195d12895 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/rmapi_utils.h
4453fe6463e3155063f2bdbf36f44697606a80a5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/rmapi/client_resource.h
7615ac3a83d0ad23b2160ff8ad90bec9eb1f3c6c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/journal.h
b259f23312abe56d34a8f0da36ef549ef60ba5b0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h
c6efd51b8b8447829a0867cd7fb7a5a5a2fb1e3d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/traceable.h
7e75b5d99376fba058b31996d49449f8fe62d3f0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/profiler.h
fd780f85cb1cd0fd3914fa31d1bd4933437b791d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/tracer.h
3a28bf1692efb34d2161907c3781401951cc2d4f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/diagnostics/journal_structs.h
c8496199cd808ed4c79d8e149961e721ad96714e - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/capability.h
e5b881419bc00d925eba9f8493f6b36cf3ce7ca7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os_stub.h
408c0340350b813c3cba17fd36171075e156df72 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os.h
cda75171ca7d8bf920aab6d56ef9aadec16fd15d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/nv_memory_type.h
af25180a08db4d5d20afd09f948b15d8c4d2d738 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/os/os_fixed_mode_timings_props.h
457c02092adfc1587d6e3cd866e28c567acbc43a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/info_block.h
bffae4da6a1f9b7dc7c879587fd674b49b46dac1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/core.h
cbfff1f06eecc99fb5a1c82d43397043058f02fc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/printf.h
f929d43974893cd155ab2f5f77606f0040fe3e39 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/locks.h
b5859c7862fb3eeb266f7213845885789801194a - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/system.h
37f267155ddfc3db38f110dbb0397f0463d055ff - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/strict.h
bdc4ab675c6f6c4bd77c3aaf08aa5c865b186802 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/hal.h
ed496ab6e8b64d3398f929146e908c5a453a03d9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/prelude.h
b319914c97f9978488e8fb049d39c72ed64fd4d2 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/thread_state.h
b00302aec7e4f4e3b89a2f699f8b1f18fc17b1ba - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/core/hal_mgr.h
8ef620afdf720259cead00d20fae73d31e59c2f7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h
2c48d7335bdb0b7ea88b78216c0aeab2e11e00c1 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h
e188d9f2d042ffe029b96d8fbb16c79a0fc0fb01 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_db.h
ea32018e3464bb1ac792e39227badf482fa2dc67 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_group.h
5b151d0d97b83c9fb76b76c476947f9e15e774ad - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h
0ce5d6370c086d2944b2e8d31ff72a510d98dc8f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h
4c386104eaead66c66df11258c3f1182b46e96ee - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h
a5f49a031db4171228a27482d091283e84632ace - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/system_mem.h
d15991bc770c5ab41fe746995294c5213efa056b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/io_vaspace.h
5ae08b2077506cbc41e40e1b3672e615ce9d910f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/vaspace.h
02d6a37ef1bb057604cb98a905fa02429f200c96 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/mem.h
1a08e83fd6f0a072d6887c60c529e29211bcd007 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h
2d4afabd63699feec3aea5e89601db009fc51a08 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/mem_mgr/standard_mem.h
5e9928552086947b10092792db4a8c4c57a84adf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/platform/acpi_common.h
2f05394872ffa95d700b7822489fa59f74ad5819 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/kernel/platform/sli/sli.h
fff3ebc8527b34f8c463daad4d20ee5e33321344 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/ref_count.h
04dba2b7a6a360f3e855a7d6a7484ddcdfb90c19 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/base_utils.h
f8d9eb5f6a6883de962b63b4b7de35c01b20182f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/protobuf/prb.h
601edb7333b87349d791d430f1cac84fb6fbb919 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/lib/zlib/inflate.h
9255fff39d7422ca4a56ba5ab60866779201d3e8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/poolalloc.h
8dd7f2d9956278ed036bbc288bff4dde86a9b509 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/eventbufferproducer.h
e53d5fc9b66dbec4c947224050866cec30b2f537 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvrange.h
398e4cd63852a18da6e42b920eacd927a2c38bc0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nv_enum.h
ba3c81e9eae32eefbf81818b48fdf6ccd7e73163 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvmacro.h
18321894aa7631b491ea39edc2d45d1028cdc9c6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvprintf.h
167f49cccc912430bb6b3cb77395f665a32cc8be - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvbitvector.h
1ed5d8ae82f37112b163187fa48d2720957e6bdf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/utils/nvassert.h
62a18f19f79512ebccdf286068e0b557c7926e13 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/runtime.h
00433b51c4d6254fd4dfc3dcd9b4ad59e485e7c0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/object.h
1b28bd0ee2e560ca2854a73a3ee5fb1cf713d013 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/utility.h
5cadc87ba685991c7d4c6d453dcc9a2cca4398bf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/prelude.h
664ff0e10e893923b70425fa49c9c48ed0735573 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvoc/rtti.h
bdb558ee8f782e6be06fc262820f6bd9ce75bd51 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/tls/tls.h
56b8bae7756ed36d0831f76f95033f74eaab01db - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h
7239704e6fe88b9d75984fb5e9f4b5706502d7f3 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/nvlog_printf.h
e08146f5de1596f5337c49cfbe180e30e880dedb - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/nvlog.h
d2c035e67e295b8f33f0fc52d9c30e43c5d7c2ba - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h
cd033fe116a41285a979e629a2ee7b11ec99369f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_access_rights.h
2dec1c73507f66736674d203cc4a00813ccb11bc - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_domain.h
a0d3d164eb92280353cdc4458d2561aae8a68c1d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_server.h
89ece4711626bf1e4197c69bd5754e2798214d76 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/resserv.h
bacdb2c1a1dbf182a0a3be15efa0a5f83365118f - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_resource.h
df174d6b4f718ef699ca6f38c16aaeffa111ad3c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_access_map.h
841ddca998b570feb1d59b50d644c8f2b59ae8e9 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/resserv/rs_client.h
b795f5cb77ecd2cc407102900b63977cfb34bbfd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/ioaccess/ioaccess.h
3dcee4e110f4c571e7f49fae2f2d0630d008a906 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/nvport.h
46345715dde843be2890b33f191b2f3b69385e0d - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/memory.h
a1d93b6ec8ff01a3c2651e772a826ee11a7781d7 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/util.h
b93c2532babf176f7b91735682e7d7cdc41f96f8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/debug.h
147d47ef4bd860394d1d8ae82c68d97887e2898b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/core.h
6d698ca4fc5e48c525f214a57e1de0cc4aa9e36b - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/thread.h
3e656d5ed1f5df898ec444921ce77a40ead66b28 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/atomic.h
3ac7ddf3d402f3fd20cffe9d4e93f457de319605 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/sync.h
2487ffc1eb1e50b27ba07e0581da543d80bdaa72 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/safe.h
22420ad669a9809602f111385b7840556e58ecff - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/cpu.h
6ad1beaa2783a57330240d47b373930cd36ca5d0 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/crypto.h
2805fad632acad045044e0b8417de88032177300 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/string.h
23afbd04f4e4b3301edcfdec003c8e936d898e38 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h
eedda5c4b0611c3b95f726b0a2db4b0a23b7b1cf - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h
a8c9b83169aceb5f97d9f7a411db449496dc18f6 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_generic.h
aafca30178f49676f640be9c6d34f623a3e3a9a4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/safe_generic.h
600ad8781585e87df49ab1aaa39a07c8e8de74f5 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h
0747ee16c7e6c726f568867d0fbbad411c8795c8 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/sync_tracking.h
2a76929dc6b0e8624d02002600bc454cc851dee4 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/atomic_clang.h
1d6a239ed6c8dab1397f056a81ff456141ec7f9c - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/util_valist.h
31f2042e852f074970644903335af5ffa2b59c38 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/nvport/inline/memory_tracking.h
65a237b66732aafe39bc4a14d87debd2b094fb83 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/map.h
c9e75f7b02241ededa5328a4f559e70dec60d159 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/type_safety.h
3924b67e6d63e9a15876331c695daaf679454b05 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/list.h
a28ab42de95e4878fb46e19d7b965c23f92b3213 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/btree.h
4cd6b110470da3aee29e999e096ca582104fab21 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/queue.h
1dacc1c1efc757c12e4c64eac171474a798b86fd - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/eheap_old.h
969cbac56935a80fafd7cceff157b27e623f9429 - NVIDIA-kernel-module-source-TempVersion/src/nvidia/inc/libraries/containers/multimap.h

Change-Id: I19565adc2503125a30a3ce9b8df155929548bcdb
This commit is contained in:
svcmobrel-release
2022-08-15 08:54:29 -07:00
parent 0872bd5b3b
commit 8ef68d7c1e
1166 changed files with 460314 additions and 0 deletions

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,81 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include <linux/backlight.h>
#include "os-interface.h"
#include "nv-linux.h"
NV_STATUS NV_API_CALL nv_get_tegra_brightness_level
(
nv_state_t *nv,
NvU32 *brightness
)
{
#ifdef NV_GET_BACKLIGHT_DEVICE_BY_NAME_PRESENT
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct backlight_device *bd;
bd = get_backlight_device_by_name(nvl->backlight.device_name);
if (bd == NULL)
{
nv_printf(NV_DBG_ERRORS, "Unable to get backlight device\n");
return NV_ERR_GENERIC;
}
*brightness = bd->props.brightness;
return NV_OK;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
NV_STATUS NV_API_CALL nv_set_tegra_brightness_level
(
nv_state_t *nv,
NvU32 brightness
)
{
#ifdef NV_GET_BACKLIGHT_DEVICE_BY_NAME_PRESENT
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct backlight_device *bd;
bd = get_backlight_device_by_name(nvl->backlight.device_name);
if (bd == NULL)
{
nv_printf(NV_DBG_ERRORS, "Unable to get backlight device\n");
return NV_ERR_GENERIC;
}
bd->props.brightness = brightness;
backlight_update_status(bd);
return NV_OK;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}

View File

@@ -0,0 +1,853 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-linux.h"
#include "nv-caps.h"
#include "nv-procfs.h"
#include "nv-hash.h"
extern int NVreg_ModifyDeviceFiles;
/* sys_close() or __close_fd() */
#include <linux/syscalls.h>
#define NV_CAP_DRV_MINOR_COUNT 8192
/* Hash table with 512 buckets */
#define NV_CAP_HASH_BITS 9
NV_DECLARE_HASHTABLE(g_nv_cap_hash_table, NV_CAP_HASH_BITS);
#define NV_CAP_HASH_SIZE NV_HASH_SIZE(g_nv_cap_hash_table)
#define nv_cap_hash_key(path) (nv_string_hash(path) % NV_CAP_HASH_SIZE)
typedef struct nv_cap_table_entry
{
/* name must be the first element */
const char *name;
int minor;
struct hlist_node hlist;
} nv_cap_table_entry_t;
#define NV_CAP_NUM_ENTRIES(_table) (sizeof(_table) / sizeof(_table[0]))
static nv_cap_table_entry_t g_nv_cap_nvlink_table[] =
{
{"/driver/nvidia-nvlink/capabilities/fabric-mgmt"}
};
static nv_cap_table_entry_t g_nv_cap_mig_table[] =
{
{"/driver/nvidia/capabilities/mig/config"},
{"/driver/nvidia/capabilities/mig/monitor"}
};
static nv_cap_table_entry_t g_nv_cap_sys_table[] =
{
};
#define NV_CAP_MIG_CI_ENTRIES(_gi) \
{_gi "/ci0/access"}, \
{_gi "/ci1/access"}, \
{_gi "/ci2/access"}, \
{_gi "/ci3/access"}, \
{_gi "/ci4/access"}, \
{_gi "/ci5/access"}, \
{_gi "/ci6/access"}, \
{_gi "/ci7/access"}
#define NV_CAP_MIG_GI_ENTRIES(_gpu) \
{_gpu "/gi0/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi0"), \
{_gpu "/gi1/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi1"), \
{_gpu "/gi2/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi2"), \
{_gpu "/gi3/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi3"), \
{_gpu "/gi4/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi4"), \
{_gpu "/gi5/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi5"), \
{_gpu "/gi6/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi6"), \
{_gpu "/gi7/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi7"), \
{_gpu "/gi8/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi8"), \
{_gpu "/gi9/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi9"), \
{_gpu "/gi10/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi10"), \
{_gpu "/gi11/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi11"), \
{_gpu "/gi12/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi12"), \
{_gpu "/gi13/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi13"), \
{_gpu "/gi14/access"}, \
NV_CAP_MIG_CI_ENTRIES(_gpu "/gi14")
static nv_cap_table_entry_t g_nv_cap_mig_gpu_table[] =
{
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu0/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu1/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu2/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu3/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu4/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu5/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu6/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu7/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu8/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu9/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu10/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu11/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu12/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu13/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu14/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu15/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu16/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu17/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu18/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu19/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu20/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu21/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu22/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu23/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu24/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu25/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu26/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu27/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu28/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu29/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu30/mig"),
NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu31/mig")
};
struct nv_cap
{
char *path;
char *name;
int minor;
int permissions;
int modify;
struct proc_dir_entry *parent;
struct proc_dir_entry *entry;
};
#define NV_CAP_PROCFS_WRITE_BUF_SIZE 128
typedef struct nv_cap_file_private
{
int minor;
int permissions;
int modify;
char buffer[NV_CAP_PROCFS_WRITE_BUF_SIZE];
off_t offset;
} nv_cap_file_private_t;
struct
{
NvBool initialized;
struct cdev cdev;
dev_t devno;
} g_nv_cap_drv;
#define NV_CAP_PROCFS_DIR "driver/nvidia-caps"
#define NV_CAP_NAME_BUF_SIZE 128
static struct proc_dir_entry *nv_cap_procfs_dir;
static int nv_procfs_read_nvlink_minors(struct seq_file *s, void *v)
{
int i, count;
char name[NV_CAP_NAME_BUF_SIZE];
count = NV_CAP_NUM_ENTRIES(g_nv_cap_nvlink_table);
for (i = 0; i < count; i++)
{
if (sscanf(g_nv_cap_nvlink_table[i].name,
"/driver/nvidia-nvlink/capabilities/%s", name) == 1)
{
name[sizeof(name) - 1] = '\0';
seq_printf(s, "%s %d\n", name, g_nv_cap_nvlink_table[i].minor);
}
}
return 0;
}
static int nv_procfs_read_sys_minors(struct seq_file *s, void *v)
{
int i, count;
char name[NV_CAP_NAME_BUF_SIZE];
count = NV_CAP_NUM_ENTRIES(g_nv_cap_sys_table);
for (i = 0; i < count; i++)
{
if (sscanf(g_nv_cap_sys_table[i].name,
"/driver/nvidia/capabilities/%s", name) == 1)
{
name[sizeof(name) - 1] = '\0';
seq_printf(s, "%s %d\n", name, g_nv_cap_sys_table[i].minor);
}
}
return 0;
}
static int nv_procfs_read_mig_minors(struct seq_file *s, void *v)
{
int i, count, gpu;
char name[NV_CAP_NAME_BUF_SIZE];
count = NV_CAP_NUM_ENTRIES(g_nv_cap_mig_table);
for (i = 0; i < count; i++)
{
if (sscanf(g_nv_cap_mig_table[i].name,
"/driver/nvidia/capabilities/mig/%s", name) == 1)
{
name[sizeof(name) - 1] = '\0';
seq_printf(s, "%s %d\n", name, g_nv_cap_mig_table[i].minor);
}
}
count = NV_CAP_NUM_ENTRIES(g_nv_cap_mig_gpu_table);
for (i = 0; i < count; i++)
{
if (sscanf(g_nv_cap_mig_gpu_table[i].name,
"/driver/nvidia/capabilities/gpu%d/mig/%s", &gpu, name) == 2)
{
name[sizeof(name) - 1] = '\0';
seq_printf(s, "gpu%d/%s %d\n",
gpu, name, g_nv_cap_mig_gpu_table[i].minor);
}
}
return 0;
}
NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(nvlink_minors, nv_system_pm_lock);
NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(mig_minors, nv_system_pm_lock);
NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(sys_minors, nv_system_pm_lock);
static void nv_cap_procfs_exit(void)
{
if (!nv_cap_procfs_dir)
{
return;
}
nv_procfs_unregister_all(nv_cap_procfs_dir, nv_cap_procfs_dir);
nv_cap_procfs_dir = NULL;
}
int nv_cap_procfs_init(void)
{
static struct proc_dir_entry *file_entry;
nv_cap_procfs_dir = NV_CREATE_PROC_DIR(NV_CAP_PROCFS_DIR, NULL);
if (nv_cap_procfs_dir == NULL)
{
return -EACCES;
}
file_entry = NV_CREATE_PROC_FILE("mig-minors", nv_cap_procfs_dir,
mig_minors, NULL);
if (file_entry == NULL)
{
goto cleanup;
}
file_entry = NV_CREATE_PROC_FILE("nvlink-minors", nv_cap_procfs_dir,
nvlink_minors, NULL);
if (file_entry == NULL)
{
goto cleanup;
}
file_entry = NV_CREATE_PROC_FILE("sys-minors", nv_cap_procfs_dir,
sys_minors, NULL);
if (file_entry == NULL)
{
goto cleanup;
}
return 0;
cleanup:
nv_cap_procfs_exit();
return -EACCES;
}
static int nv_cap_find_minor(char *path)
{
unsigned int key = nv_cap_hash_key(path);
nv_cap_table_entry_t *entry;
nv_hash_for_each_possible(g_nv_cap_hash_table, entry, hlist, key)
{
if (strcmp(path, entry->name) == 0)
{
return entry->minor;
}
}
return -1;
}
static void _nv_cap_table_init(nv_cap_table_entry_t *table, int count)
{
int i;
unsigned int key;
static int minor = 0;
for (i = 0; i < count; i++)
{
table[i].minor = minor++;
INIT_HLIST_NODE(&table[i].hlist);
key = nv_cap_hash_key(table[i].name);
nv_hash_add(g_nv_cap_hash_table, &table[i].hlist, key);
}
WARN_ON(minor > NV_CAP_DRV_MINOR_COUNT);
}
#define nv_cap_table_init(table) \
_nv_cap_table_init(table, NV_CAP_NUM_ENTRIES(table))
static void nv_cap_tables_init(void)
{
BUILD_BUG_ON(offsetof(nv_cap_table_entry_t, name) != 0);
nv_hash_init(g_nv_cap_hash_table);
nv_cap_table_init(g_nv_cap_nvlink_table);
nv_cap_table_init(g_nv_cap_mig_table);
nv_cap_table_init(g_nv_cap_mig_gpu_table);
nv_cap_table_init(g_nv_cap_sys_table);
}
static ssize_t nv_cap_procfs_write(struct file *file,
const char __user *buffer,
size_t count, loff_t *pos)
{
nv_cap_file_private_t *private = NULL;
unsigned long bytes_left;
char *proc_buffer;
private = ((struct seq_file *)file->private_data)->private;
bytes_left = (sizeof(private->buffer) - private->offset - 1);
if (count == 0)
{
return -EINVAL;
}
if ((bytes_left == 0) || (count > bytes_left))
{
return -ENOSPC;
}
proc_buffer = &private->buffer[private->offset];
if (copy_from_user(proc_buffer, buffer, count))
{
nv_printf(NV_DBG_ERRORS, "nv-caps: failed to copy in proc data!\n");
return -EFAULT;
}
private->offset += count;
proc_buffer[count] = '\0';
*pos = private->offset;
return count;
}
static int nv_cap_procfs_read(struct seq_file *s, void *v)
{
nv_cap_file_private_t *private = s->private;
seq_printf(s, "%s: %d\n", "DeviceFileMinor", private->minor);
seq_printf(s, "%s: %d\n", "DeviceFileMode", private->permissions);
seq_printf(s, "%s: %d\n", "DeviceFileModify", private->modify);
return 0;
}
static int nv_cap_procfs_open(struct inode *inode, struct file *file)
{
nv_cap_file_private_t *private = NULL;
int rc;
nv_cap_t *cap = NV_PDE_DATA(inode);
NV_KMALLOC(private, sizeof(nv_cap_file_private_t));
if (private == NULL)
{
return -ENOMEM;
}
private->minor = cap->minor;
private->permissions = cap->permissions;
private->offset = 0;
private->modify = cap->modify;
rc = single_open(file, nv_cap_procfs_read, private);
if (rc < 0)
{
NV_KFREE(private, sizeof(nv_cap_file_private_t));
return rc;
}
rc = nv_down_read_interruptible(&nv_system_pm_lock);
if (rc < 0)
{
single_release(inode, file);
NV_KFREE(private, sizeof(nv_cap_file_private_t));
}
return rc;
}
static int nv_cap_procfs_release(struct inode *inode, struct file *file)
{
struct seq_file *s = file->private_data;
nv_cap_file_private_t *private = NULL;
char *buffer;
int modify;
nv_cap_t *cap = NV_PDE_DATA(inode);
if (s != NULL)
{
private = s->private;
}
up_read(&nv_system_pm_lock);
single_release(inode, file);
if (private != NULL)
{
buffer = private->buffer;
if (private->offset != 0)
{
if (sscanf(buffer, "DeviceFileModify: %d", &modify) == 1)
{
cap->modify = modify;
}
}
NV_KFREE(private, sizeof(nv_cap_file_private_t));
}
/*
* All open files using the proc entry will be invalidated
* if the entry is removed.
*/
file->private_data = NULL;
return 0;
}
static nv_proc_ops_t g_nv_cap_procfs_fops = {
NV_PROC_OPS_SET_OWNER()
.NV_PROC_OPS_OPEN = nv_cap_procfs_open,
.NV_PROC_OPS_RELEASE = nv_cap_procfs_release,
.NV_PROC_OPS_WRITE = nv_cap_procfs_write,
.NV_PROC_OPS_READ = seq_read,
.NV_PROC_OPS_LSEEK = seq_lseek,
};
/* forward declaration of g_nv_cap_drv_fops */
static struct file_operations g_nv_cap_drv_fops;
int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd)
{
struct file *file;
int dup_fd;
struct inode *inode = NULL;
dev_t rdev = 0;
struct files_struct *files = current->files;
struct fdtable *fdt;
if (cap == NULL)
{
return -1;
}
file = fget(fd);
if (file == NULL)
{
return -1;
}
inode = NV_FILE_INODE(file);
if (inode == NULL)
{
goto err;
}
/* Make sure the fd belongs to the nv-cap-drv */
if (file->f_op != &g_nv_cap_drv_fops)
{
goto err;
}
/* Make sure the fd has the expected capability */
rdev = inode->i_rdev;
if (MINOR(rdev) != cap->minor)
{
goto err;
}
dup_fd = NV_GET_UNUSED_FD_FLAGS(O_CLOEXEC);
if (dup_fd < 0)
{
dup_fd = NV_GET_UNUSED_FD();
if (dup_fd < 0)
{
goto err;
}
/*
* Set CLOEXEC before installing the FD.
*
* If fork() happens in between, the opened unused FD will have
* a NULL struct file associated with it, which is okay.
*
* The only well known bug here is the race with dup(2), which is
* already documented in the kernel, see fd_install()'s description.
*/
spin_lock(&files->file_lock);
fdt = files_fdtable(files);
NV_SET_CLOSE_ON_EXEC(dup_fd, fdt);
spin_unlock(&files->file_lock);
}
fd_install(dup_fd, file);
return dup_fd;
err:
fput(file);
return -1;
}
void NV_API_CALL nv_cap_close_fd(int fd)
{
if (fd == -1)
{
return;
}
/*
* Acquire task_lock as we access current->files explicitly (__close_fd)
* and implicitly (sys_close), and it will race with the exit path.
*/
task_lock(current);
/* Nothing to do, we are in exit path */
if (current->files == NULL)
{
task_unlock(current);
return;
}
/*
* From v4.17-rc1 (to v5.10.8) kernels have stopped exporting sys_close(fd)
* and started exporting __close_fd, as of this commit:
* 2018-04-02 2ca2a09d6215 ("fs: add ksys_close() wrapper; remove in-kernel
* calls to sys_close()")
* Kernels v5.11-rc1 onwards have stopped exporting __close_fd, and started
* exporting close_fd, as of this commit:
* 2020-12-20 8760c909f54a ("file: Rename __close_fd to close_fd and remove
* the files parameter")
*/
#if NV_IS_EXPORT_SYMBOL_PRESENT_close_fd
close_fd(fd);
#elif NV_IS_EXPORT_SYMBOL_PRESENT___close_fd
__close_fd(current->files, fd);
#else
sys_close(fd);
#endif
task_unlock(current);
}
static nv_cap_t* nv_cap_alloc(nv_cap_t *parent_cap, const char *name)
{
nv_cap_t *cap;
int len;
if (parent_cap == NULL || name == NULL)
{
return NULL;
}
NV_KMALLOC(cap, sizeof(nv_cap_t));
if (cap == NULL)
{
return NULL;
}
len = strlen(name) + strlen(parent_cap->path) + 2;
NV_KMALLOC(cap->path, len);
if (cap->path == NULL)
{
NV_KFREE(cap, sizeof(nv_cap_t));
return NULL;
}
strcpy(cap->path, parent_cap->path);
strcat(cap->path, "/");
strcat(cap->path, name);
len = strlen(name) + 1;
NV_KMALLOC(cap->name, len);
if (cap->name == NULL)
{
NV_KFREE(cap->path, strlen(cap->path) + 1);
NV_KFREE(cap, sizeof(nv_cap_t));
return NULL;
}
strcpy(cap->name, name);
cap->minor = -1;
cap->modify = NVreg_ModifyDeviceFiles;
return cap;
}
static void nv_cap_free(nv_cap_t *cap)
{
if (cap == NULL)
{
return;
}
NV_KFREE(cap->path, strlen(cap->path) + 1);
NV_KFREE(cap->name, strlen(cap->name) + 1);
NV_KFREE(cap, sizeof(nv_cap_t));
}
nv_cap_t* NV_API_CALL nv_cap_create_file_entry(nv_cap_t *parent_cap,
const char *name, int mode)
{
nv_cap_t *cap = NULL;
int minor;
cap = nv_cap_alloc(parent_cap, name);
if (cap == NULL)
{
return NULL;
}
cap->parent = parent_cap->entry;
cap->permissions = mode;
mode = (S_IFREG | S_IRUGO);
minor = nv_cap_find_minor(cap->path);
if (minor < 0)
{
nv_cap_free(cap);
return NULL;
}
cap->minor = minor;
cap->entry = proc_create_data(name, mode, parent_cap->entry,
&g_nv_cap_procfs_fops, (void*)cap);
if (cap->entry == NULL)
{
nv_cap_free(cap);
return NULL;
}
return cap;
}
nv_cap_t* NV_API_CALL nv_cap_create_dir_entry(nv_cap_t *parent_cap,
const char *name, int mode)
{
nv_cap_t *cap = NULL;
cap = nv_cap_alloc(parent_cap, name);
if (cap == NULL)
{
return NULL;
}
cap->parent = parent_cap->entry;
cap->permissions = mode;
cap->minor = -1;
mode = (S_IFDIR | S_IRUGO | S_IXUGO);
cap->entry = NV_PROC_MKDIR_MODE(name, mode, parent_cap->entry);
if (cap->entry == NULL)
{
nv_cap_free(cap);
return NULL;
}
return cap;
}
nv_cap_t* NV_API_CALL nv_cap_init(const char *path)
{
nv_cap_t parent_cap;
nv_cap_t *cap;
int mode;
char *name = NULL;
char dir[] = "/capabilities";
if (path == NULL)
{
return NULL;
}
NV_KMALLOC(name, (strlen(path) + strlen(dir)) + 1);
if (name == NULL)
{
return NULL;
}
strcpy(name, path);
strcat(name, dir);
parent_cap.entry = NULL;
parent_cap.path = "";
parent_cap.name = "";
mode = S_IRUGO | S_IXUGO;
cap = nv_cap_create_dir_entry(&parent_cap, name, mode);
NV_KFREE(name, strlen(name) + 1);
return cap;
}
void NV_API_CALL nv_cap_destroy_entry(nv_cap_t *cap)
{
if (WARN_ON(cap == NULL))
{
return;
}
remove_proc_entry(cap->name, cap->parent);
nv_cap_free(cap);
}
static int nv_cap_drv_open(struct inode *inode, struct file *file)
{
return 0;
}
static int nv_cap_drv_release(struct inode *inode, struct file *file)
{
return 0;
}
static struct file_operations g_nv_cap_drv_fops =
{
.owner = THIS_MODULE,
.open = nv_cap_drv_open,
.release = nv_cap_drv_release
};
int NV_API_CALL nv_cap_drv_init(void)
{
int rc;
nv_cap_tables_init();
if (g_nv_cap_drv.initialized)
{
nv_printf(NV_DBG_ERRORS, "nv-caps-drv is already initialized.\n");
return -EBUSY;
}
rc = alloc_chrdev_region(&g_nv_cap_drv.devno,
0,
NV_CAP_DRV_MINOR_COUNT,
"nvidia-caps");
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "nv-caps-drv failed to create cdev region.\n");
return rc;
}
cdev_init(&g_nv_cap_drv.cdev, &g_nv_cap_drv_fops);
g_nv_cap_drv.cdev.owner = THIS_MODULE;
rc = cdev_add(&g_nv_cap_drv.cdev, g_nv_cap_drv.devno,
NV_CAP_DRV_MINOR_COUNT);
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "nv-caps-drv failed to create cdev.\n");
goto cdev_add_fail;
}
rc = nv_cap_procfs_init();
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "nv-caps-drv: unable to init proc\n");
goto proc_init_fail;
}
g_nv_cap_drv.initialized = NV_TRUE;
return 0;
proc_init_fail:
cdev_del(&g_nv_cap_drv.cdev);
cdev_add_fail:
unregister_chrdev_region(g_nv_cap_drv.devno, NV_CAP_DRV_MINOR_COUNT);
return rc;
}
void NV_API_CALL nv_cap_drv_exit(void)
{
if (!g_nv_cap_drv.initialized)
{
return;
}
nv_cap_procfs_exit();
cdev_del(&g_nv_cap_drv.cdev);
unregister_chrdev_region(g_nv_cap_drv.devno, NV_CAP_DRV_MINOR_COUNT);
g_nv_cap_drv.initialized = NV_FALSE;
}

View File

@@ -0,0 +1,630 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
/*!
* @brief The below defined static const array points to the
* clock mentioned in enum defined in below file.
*
* arch/nvalloc/unix/include/nv.h
* enum TEGRASOC_WHICH_CLK
*
* The order should be maintained/updated together.
*/
static const char *osMapClk[] = {
[TEGRASOC_WHICH_CLK_NVDISPLAYHUB] = "nvdisplayhub_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_DISP] = "nvdisplay_disp_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_P0] = "nvdisplay_p0_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_P1] = "nvdisplay_p1_clk",
[TEGRASOC_WHICH_CLK_DPAUX0] = "dpaux0_clk",
[TEGRASOC_WHICH_CLK_FUSE] = "fuse_clk",
[TEGRASOC_WHICH_CLK_DSIPLL_VCO] = "dsipll_vco_clk",
[TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN] = "dsipll_clkoutpn_clk",
[TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA] = "dsipll_clkouta_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_VCO] = "sppll0_vco_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN] = "sppll0_clkoutpn_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA] = "sppll0_clkouta_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB] = "sppll0_clkoutb_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_DIV10] = "sppll0_div10_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_DIV25] = "sppll0_div25_clk",
[TEGRASOC_WHICH_CLK_SPPLL0_DIV27] = "sppll0_div27_clk",
[TEGRASOC_WHICH_CLK_SPPLL1_VCO] = "sppll1_vco_clk",
[TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN] = "sppll1_clkoutpn_clk",
[TEGRASOC_WHICH_CLK_SPPLL1_DIV27] = "sppll1_div27_clk",
[TEGRASOC_WHICH_CLK_VPLL0_REF] = "vpll0_ref_clk",
[TEGRASOC_WHICH_CLK_VPLL0] = "vpll0_clk",
[TEGRASOC_WHICH_CLK_VPLL1] = "vpll1_clk",
[TEGRASOC_WHICH_CLK_NVDISPLAY_P0_REF] = "nvdisplay_p0_ref_clk",
[TEGRASOC_WHICH_CLK_RG0] = "rg0_clk",
[TEGRASOC_WHICH_CLK_RG1] = "rg1_clk",
[TEGRASOC_WHICH_CLK_DISPPLL] = "disppll_clk",
[TEGRASOC_WHICH_CLK_DISPHUBPLL] = "disphubpll_clk",
[TEGRASOC_WHICH_CLK_DSI_LP] = "dsi_lp_clk",
[TEGRASOC_WHICH_CLK_DSI_CORE] = "dsi_core_clk",
[TEGRASOC_WHICH_CLK_DSI_PIXEL] = "dsi_pixel_clk",
[TEGRASOC_WHICH_CLK_PRE_SOR0] = "pre_sor0_clk",
[TEGRASOC_WHICH_CLK_PRE_SOR1] = "pre_sor1_clk",
[TEGRASOC_WHICH_CLK_DP_LINK_REF] = "dp_link_ref_clk",
[TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT] = "sor_linka_input_clk",
[TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO] = "sor_linka_afifo_clk",
[TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M] = "sor_linka_afifo_m_clk",
[TEGRASOC_WHICH_CLK_RG0_M] = "rg0_m_clk",
[TEGRASOC_WHICH_CLK_RG1_M] = "rg1_m_clk",
[TEGRASOC_WHICH_CLK_SOR0_M] = "sor0_m_clk",
[TEGRASOC_WHICH_CLK_SOR1_M] = "sor1_m_clk",
[TEGRASOC_WHICH_CLK_PLLHUB] = "pllhub_clk",
[TEGRASOC_WHICH_CLK_SOR0] = "sor0_clk",
[TEGRASOC_WHICH_CLK_SOR1] = "sor1_clk",
[TEGRASOC_WHICH_CLK_SOR_PAD_INPUT] = "sor_pad_input_clk",
[TEGRASOC_WHICH_CLK_PRE_SF0] = "pre_sf0_clk",
[TEGRASOC_WHICH_CLK_SF0] = "sf0_clk",
[TEGRASOC_WHICH_CLK_SF1] = "sf1_clk",
[TEGRASOC_WHICH_CLK_PRE_SOR0_REF] = "pre_sor0_ref_clk",
[TEGRASOC_WHICH_CLK_PRE_SOR1_REF] = "pre_sor1_ref_clk",
[TEGRASOC_WHICH_CLK_SOR0_PLL_REF] = "sor0_ref_pll_clk",
[TEGRASOC_WHICH_CLK_SOR1_PLL_REF] = "sor1_ref_pll_clk",
[TEGRASOC_WHICH_CLK_SOR0_REF] = "sor0_ref_clk",
[TEGRASOC_WHICH_CLK_SOR1_REF] = "sor1_ref_clk",
[TEGRASOC_WHICH_CLK_DSI_PAD_INPUT] = "dsi_pad_input_clk",
[TEGRASOC_WHICH_CLK_OSC] = "osc_clk",
[TEGRASOC_WHICH_CLK_DSC] = "dsc_clk",
[TEGRASOC_WHICH_CLK_MAUD] = "maud_clk",
[TEGRASOC_WHICH_CLK_AZA_2XBIT] = "aza_2xbit_clk",
[TEGRASOC_WHICH_CLK_AZA_BIT] = "aza_bit_clk",
[TEGRASOC_WHICH_CLK_MIPI_CAL] = "mipi_cal_clk",
[TEGRASOC_WHICH_CLK_UART_FST_MIPI_CAL] = "uart_fst_mipi_cal_clk",
[TEGRASOC_WHICH_CLK_SOR0_DIV] = "sor0_div_clk",
};
/*!
* @brief Get the clock handles.
*
* Look up and obtain the clock handles for each display
* clock at boot-time and later using all those handles
* for rest of the operations. for example, enable/disable
* clocks, get current/max frequency of the clock.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
*
* @returns NV_STATUS
*/
NV_STATUS NV_API_CALL nv_clk_get_handles(
nv_state_t *nv)
{
NV_STATUS status = NV_OK;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvU32 i, j, clk_count;
#if defined(NV_DEVM_CLK_BULK_GET_ALL_PRESENT)
struct clk_bulk_data *clks;
clk_count = devm_clk_bulk_get_all(nvl->dev, &clks);
if (clk_count == 0)
{
nv_printf(NV_DBG_ERRORS,"NVRM: nv_clk_get_handles, failed to get clk handles from devm_clk_bulk_get_all\n");
return NV_ERR_OBJECT_NOT_FOUND;
}
//
// TEGRASOC_WHICH_CLK_MAX is maximum clock defined in below enum
// arch/nvalloc/unix/include/nv.h
// enum TEGRASOC_WHICH_CLK
//
for (i = 0U; i < clk_count; i++)
{
for (j = 0U; j < TEGRASOC_WHICH_CLK_MAX; j++)
{
if (!strcmp(osMapClk[j], clks[i].id))
{
nvl->disp_clk_handles.clk[j].handles = clks[i].clk;
nvl->disp_clk_handles.clk[j].clkName = __clk_get_name(clks[i].clk);
break;
}
}
if (j == TEGRASOC_WHICH_CLK_MAX)
{
nv_printf(NV_DBG_ERRORS,"NVRM: nv_clk_get_handles, failed to find TEGRA_SOC_WHICH_CLK for %s\n", clks[i].id);
return NV_ERR_OBJECT_NOT_FOUND;
}
}
#else
nv_printf(NV_DBG_ERRORS, "NVRM: devm_clk_bulk_get_all API is not present\n");
status = NV_ERR_OBJECT_NOT_FOUND;
#endif
return status;
}
/*!
* @brief Clear the clock handles assigned by nv_clk_get_handles()
*
* Clear the clock handle for each display of the clocks at shutdown-time.
* Since clock handles are obtained by devm managed devm_clk_bulk_get_all()
* API, devm_clk_bulk_release_all() API is called on all the enumerated
* clk handles automatically when module gets unloaded. Hence, no need
* to explicitly free those handles.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
*/
void NV_API_CALL nv_clk_clear_handles(
nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvU32 i;
//
// TEGRASOC_WHICH_CLK_MAX is maximum clock defined in below enum
// arch/nvalloc/unix/include/nv.h
// enum TEGRASOC_WHICH_CLK
//
for (i = 0U; i < TEGRASOC_WHICH_CLK_MAX; i++)
{
if (nvl->disp_clk_handles.clk[i].handles != NULL)
{
nvl->disp_clk_handles.clk[i].handles = NULL;
}
}
}
/*!
* @brief Enable the clock.
*
* Enabling the clock before performing any operation
* on it. The below function will prepare the clock for use
* and enable them.
*
* for more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
*
* @returns NV_STATUS
*/
NV_STATUS NV_API_CALL nv_enable_clk(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status = NV_ERR_GENERIC;
int ret;
if (nvl->disp_clk_handles.clk[whichClkOS].handles != NULL)
{
ret = clk_prepare_enable(nvl->disp_clk_handles.clk[whichClkOS].handles);
if (ret == 0)
{
status = NV_OK;
}
else
{
status = NV_ERR_FEATURE_NOT_ENABLED;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_prepare_enable failed with error: %d\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief Check if clock is enable or not.
*
* Checking the clock status if it is enabled or not before
* enabling or disabling it.
*
* for more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
*
* @returns clock status.
*/
NvBool NV_API_CALL nv_is_clk_enabled(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
bool ret;
if (nvl->disp_clk_handles.clk[whichClkOS].handles == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: clock handle requested not found.\n");
return NV_FALSE;
}
ret = __clk_is_enabled(nvl->disp_clk_handles.clk[whichClkOS].handles);
return ret == true;
}
/*!
* @brief Disable the clock.
*
* Disabling the clock after performing operation or required
* work with that clock is done with that particular clock.
* The below function will unprepare the clock for further use
* and disable them.
*
* Note: make sure to disable clock before clk_put is called.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
*/
void NV_API_CALL nv_disable_clk(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
clk_disable_unprepare(nvl->disp_clk_handles.clk[whichClkOS].handles);
}
/*!
* @brief Get current clock frequency.
*
* Obtain the current clock rate for a clock source.
* This is only valid once the clock source has been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
* @param[out] pCurrFreqKHz Current clock frequency
*/
NV_STATUS NV_API_CALL nv_get_curr_freq(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 *pCurrFreqKHz)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status = NV_ERR_GENERIC;
unsigned long currFreqHz;
if (nvl->disp_clk_handles.clk[whichClkOS].handles != NULL)
{
currFreqHz = clk_get_rate(nvl->disp_clk_handles.clk[whichClkOS].handles);
*pCurrFreqKHz = currFreqHz / 1000U;
if (*pCurrFreqKHz > 0U)
{
status = NV_OK;
}
else
{
status = NV_ERR_FEATURE_NOT_ENABLED;
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief Get maximum clock frequency.
*
* Obtain the maximum clock rate a clock source can provide.
* This is only valid once the clock source has been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
* @param[out] pMaxFreqKHz Maximum clock frequency
*/
NV_STATUS NV_API_CALL nv_get_max_freq(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 *pMaxFreqKHz)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status = NV_ERR_GENERIC;
long ret;
if (nvl->disp_clk_handles.clk[whichClkOS].handles != NULL)
{
//
// clk_round_rate(struct clk *clk, rate);
// rate is the maximum possible rate we give,
// it returns rounded clock rate in Hz, i.e.,
// maximum clock rate the source clock can
// support or negative errno.
// Here, rate = NV_S64_MAX
// 0 < currFreq < maxFreq < NV_S64_MAX
// clk_round_rate() round of and return the
// nearest freq what a clock can provide.
// sending NV_S64_MAX will return maxFreq.
//
ret = clk_round_rate(nvl->disp_clk_handles.clk[whichClkOS].handles, NV_U32_MAX);
if (ret >= 0)
{
*pMaxFreqKHz = (NvU32) (ret / 1000);
status = NV_OK;
}
else
{
status = NV_ERR_FEATURE_NOT_ENABLED;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_round_rate failed with error: %ld\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief Get minimum clock frequency.
*
* Obtain the minimum clock rate a clock source can provide.
* This is only valid once the clock source has been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
* @param[out] pMinFreqKHz Minimum clock frequency
*/
NV_STATUS NV_API_CALL nv_get_min_freq(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 *pMinFreqKHz)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status = NV_ERR_GENERIC;
long ret;
if (nvl->disp_clk_handles.clk[whichClkOS].handles != NULL)
{
//
// clk_round_rate(struct clk *clk, rate);
// rate is the minimum possible rate we give,
// it returns rounded clock rate in Hz, i.e.,
// minimum clock rate the source clock can
// support or negative errno.
// Here, rate = NV_S64_MAX
// 0 < minFreq currFreq < maxFreq < NV_S64_MAX
// clk_round_rate() round of and return the
// nearest freq what a clock can provide.
// sending 0 will return minFreq.
//
ret = clk_round_rate(nvl->disp_clk_handles.clk[whichClkOS].handles, 0);
if (ret >= 0)
{
*pMinFreqKHz = (NvU32) (ret / 1000);
status = NV_OK;
}
else
{
status = NV_ERR_FEATURE_NOT_ENABLED;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_round_rate failed with error: %ld\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief set clock frequency.
*
* Setting the frequency of clock source.
* This is only valid once the clock source has been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOS Enum value of the target clock
* @param[in] reqFreqKHz Required frequency
*/
NV_STATUS NV_API_CALL nv_set_freq(
nv_state_t *nv, TEGRASOC_WHICH_CLK whichClkOS, NvU32 reqFreqKHz)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status = NV_ERR_GENERIC;
int ret;
if (nvl->disp_clk_handles.clk[whichClkOS].handles != NULL)
{
ret = clk_set_rate(nvl->disp_clk_handles.clk[whichClkOS].handles,
reqFreqKHz * 1000U);
if (ret == 0)
{
status = NV_OK;
}
else
{
status = NV_ERR_INVALID_REQUEST;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_set_rate failed with error: %d\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief set parent clock.
*
* Setting the parent clock of clock source.
* This is only valid once the clock source and the parent
* clock have been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOSsource Enum value of the source clock
* @param[in] whichClkOSparent Enum value of the parent clock
*/
NV_STATUS NV_API_CALL nv_set_parent
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOSsource,
TEGRASOC_WHICH_CLK whichClkOSparent
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status = NV_ERR_GENERIC;
int ret;
if ((nvl->disp_clk_handles.clk[whichClkOSsource].handles != NULL) &&
(nvl->disp_clk_handles.clk[whichClkOSparent].handles != NULL))
{
ret = clk_set_parent(nvl->disp_clk_handles.clk[whichClkOSsource].handles,
nvl->disp_clk_handles.clk[whichClkOSparent].handles);
if (ret == 0)
{
status = NV_OK;
}
else
{
status = NV_ERR_INVALID_REQUEST;
nv_printf(NV_DBG_ERRORS, "NVRM: clk_set_parent failed with error: %d\n", ret);
}
}
else
{
status = NV_ERR_OBJECT_NOT_FOUND;
}
return status;
}
/*!
* @brief get parent clock.
*
* Getting the parent clock of clock source.
* This is only valid once the clock source and the parent
* clock have been enabled.
*
* For more details on CCF functions, please check below file:
*
* In the Linux kernel: include/linux/clk.h
* or
* https://www.kernel.org/doc/htmldocs/kernel-api/
*
* @param[in] nv Per gpu linux state
* @param[in] whichClkOSsource Enum value of the source clock
* @param[in] pWhichClkOSparent Enum value of the parent clock
*/
NV_STATUS NV_API_CALL nv_get_parent
(
nv_state_t *nv,
TEGRASOC_WHICH_CLK whichClkOSsource,
TEGRASOC_WHICH_CLK *pWhichClkOSparent
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status;
struct clk *ret;;
NvU32 i;
if (nvl->disp_clk_handles.clk[whichClkOSsource].handles != NULL)
{
ret = clk_get_parent(nvl->disp_clk_handles.clk[whichClkOSsource].handles);
if (!IS_ERR(ret))
{
const char *parentClkName = __clk_get_name(ret);
//
// TEGRASOC_WHICH_CLK_MAX is maximum clock defined in below enum
// arch/nvalloc/unix/include/nv.h
// enum TEGRASOC_WHICH_CLK
//
for (i = 0U; i < TEGRASOC_WHICH_CLK_MAX; i++)
{
if (!strcmp(nvl->disp_clk_handles.clk[i].clkName, parentClkName))
{
*pWhichClkOSparent = i;
return NV_OK;
}
}
nv_printf(NV_DBG_ERRORS, "NVRM: unexpected parent clock ref addr: %p\n", ret);
return NV_ERR_INVALID_OBJECT_PARENT;
}
else
{
nv_printf(NV_DBG_ERRORS, "NVRM: clk_get_parent failed with error: %ld\n", PTR_ERR(ret));
return NV_ERR_INVALID_POINTER;
}
}
nv_printf(NV_DBG_ERRORS, "NVRM: invalid source clock requested\n");
return NV_ERR_OBJECT_NOT_FOUND;
}

View File

@@ -0,0 +1,217 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#if defined(CONFIG_CRAY_XT)
enum {
NV_FORMAT_STATE_ORDINARY,
NV_FORMAT_STATE_INTRODUCTION,
NV_FORMAT_STATE_FLAGS,
NV_FORMAT_STATE_FIELD_WIDTH,
NV_FORMAT_STATE_PRECISION,
NV_FORMAT_STATE_LENGTH_MODIFIER,
NV_FORMAT_STATE_CONVERSION_SPECIFIER
};
enum {
NV_LENGTH_MODIFIER_NONE,
NV_LENGTH_MODIFIER_CHAR,
NV_LENGTH_MODIFIER_SHORT_INT,
NV_LENGTH_MODIFIER_LONG_INT,
NV_LENGTH_MODIFIER_LONG_LONG_INT
};
#define NV_IS_FLAG(c) \
((c) == '#' || (c) == '0' || (c) == '-' || (c) == ' ' || (c) == '+')
#define NV_IS_LENGTH_MODIFIER(c) \
((c) == 'h' || (c) == 'l' || (c) == 'L' || (c) == 'q' || (c) == 'j' || \
(c) == 'z' || (c) == 't')
#define NV_IS_CONVERSION_SPECIFIER(c) \
((c) == 'd' || (c) == 'i' || (c) == 'o' || (c) == 'u' || (c) == 'x' || \
(c) == 'X' || (c) == 'e' || (c) == 'E' || (c) == 'f' || (c) == 'F' || \
(c) == 'g' || (c) == 'G' || (c) == 'a' || (c) == 'A' || (c) == 'c' || \
(c) == 's' || (c) == 'p')
#define NV_MAX_NUM_INFO_MMRS 6
NV_STATUS nvos_forward_error_to_cray(
struct pci_dev *dev,
NvU32 error_number,
const char *format,
va_list ap
)
{
NvU32 num_info_mmrs;
NvU64 x = 0, info_mmrs[NV_MAX_NUM_INFO_MMRS];
int state = NV_FORMAT_STATE_ORDINARY;
int modifier = NV_LENGTH_MODIFIER_NONE;
NvU32 i, n = 0, m = 0;
memset(info_mmrs, 0, sizeof(info_mmrs));
while (*format != '\0')
{
switch (state)
{
case NV_FORMAT_STATE_ORDINARY:
if (*format == '%')
state = NV_FORMAT_STATE_INTRODUCTION;
break;
case NV_FORMAT_STATE_INTRODUCTION:
if (*format == '%')
{
state = NV_FORMAT_STATE_ORDINARY;
break;
}
case NV_FORMAT_STATE_FLAGS:
if (NV_IS_FLAG(*format))
{
state = NV_FORMAT_STATE_FLAGS;
break;
}
else if (*format == '*')
{
state = NV_FORMAT_STATE_FIELD_WIDTH;
break;
}
case NV_FORMAT_STATE_FIELD_WIDTH:
if ((*format >= '0') && (*format <= '9'))
{
state = NV_FORMAT_STATE_FIELD_WIDTH;
break;
}
else if (*format == '.')
{
state = NV_FORMAT_STATE_PRECISION;
break;
}
case NV_FORMAT_STATE_PRECISION:
if ((*format >= '0') && (*format <= '9'))
{
state = NV_FORMAT_STATE_PRECISION;
break;
}
else if (NV_IS_LENGTH_MODIFIER(*format))
{
state = NV_FORMAT_STATE_LENGTH_MODIFIER;
break;
}
else if (NV_IS_CONVERSION_SPECIFIER(*format))
{
state = NV_FORMAT_STATE_CONVERSION_SPECIFIER;
break;
}
case NV_FORMAT_STATE_LENGTH_MODIFIER:
if ((*format == 'h') || (*format == 'l'))
{
state = NV_FORMAT_STATE_LENGTH_MODIFIER;
break;
}
else if (NV_IS_CONVERSION_SPECIFIER(*format))
{
state = NV_FORMAT_STATE_CONVERSION_SPECIFIER;
break;
}
}
switch (state)
{
case NV_FORMAT_STATE_INTRODUCTION:
modifier = NV_LENGTH_MODIFIER_NONE;
break;
case NV_FORMAT_STATE_LENGTH_MODIFIER:
switch (*format)
{
case 'h':
modifier = (modifier == NV_LENGTH_MODIFIER_NONE)
? NV_LENGTH_MODIFIER_SHORT_INT
: NV_LENGTH_MODIFIER_CHAR;
break;
case 'l':
modifier = (modifier == NV_LENGTH_MODIFIER_NONE)
? NV_LENGTH_MODIFIER_LONG_INT
: NV_LENGTH_MODIFIER_LONG_LONG_INT;
break;
case 'q':
modifier = NV_LENGTH_MODIFIER_LONG_LONG_INT;
default:
return NV_ERR_INVALID_ARGUMENT;
}
break;
case NV_FORMAT_STATE_CONVERSION_SPECIFIER:
switch (*format)
{
case 'c':
case 'd':
case 'i':
x = (unsigned int)va_arg(ap, int);
break;
case 'o':
case 'u':
case 'x':
case 'X':
switch (modifier)
{
case NV_LENGTH_MODIFIER_LONG_LONG_INT:
x = va_arg(ap, unsigned long long int);
break;
case NV_LENGTH_MODIFIER_LONG_INT:
x = va_arg(ap, unsigned long int);
break;
case NV_LENGTH_MODIFIER_CHAR:
case NV_LENGTH_MODIFIER_SHORT_INT:
case NV_LENGTH_MODIFIER_NONE:
x = va_arg(ap, unsigned int);
break;
}
break;
default:
return NV_ERR_INVALID_ARGUMENT;
}
state = NV_FORMAT_STATE_ORDINARY;
for (i = 0; i < ((modifier == NV_LENGTH_MODIFIER_LONG_LONG_INT)
? 2 : 1); i++)
{
if (m == NV_MAX_NUM_INFO_MMRS)
return NV_ERR_INSUFFICIENT_RESOURCES;
info_mmrs[m] = ((info_mmrs[m] << 32) | (x & 0xffffffff));
x >>= 32;
if (++n == 2)
{
m++;
n = 0;
}
}
}
format++;
}
num_info_mmrs = (m + (n != 0));
if (num_info_mmrs > 0)
cray_nvidia_report_error(dev, error_number, num_info_mmrs, info_mmrs);
return NV_OK;
}
#endif

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,896 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/dma-buf.h>
#include "nv-dmabuf.h"
#if defined(CONFIG_DMA_SHARED_BUFFER)
typedef struct nv_dma_buf_mem_handle
{
NvHandle h_memory;
NvU64 offset;
NvU64 size;
NvU64 bar1_va;
} nv_dma_buf_mem_handle_t;
typedef struct nv_dma_buf_file_private
{
nv_state_t *nv;
NvHandle h_client;
NvHandle h_device;
NvHandle h_subdevice;
NvU32 total_objects;
NvU32 num_objects;
NvU64 total_size;
NvU64 attached_size;
struct mutex lock;
nv_dma_buf_mem_handle_t *handles;
NvU64 bar1_va_ref_count;
void *mig_info;
} nv_dma_buf_file_private_t;
static void
nv_dma_buf_free_file_private(
nv_dma_buf_file_private_t *priv
)
{
if (priv == NULL)
{
return;
}
if (priv->handles != NULL)
{
NV_KFREE(priv->handles, priv->total_objects * sizeof(priv->handles[0]));
priv->handles = NULL;
}
mutex_destroy(&priv->lock);
NV_KFREE(priv, sizeof(nv_dma_buf_file_private_t));
}
static nv_dma_buf_file_private_t*
nv_dma_buf_alloc_file_private(
NvU32 num_handles
)
{
nv_dma_buf_file_private_t *priv = NULL;
NV_KMALLOC(priv, sizeof(nv_dma_buf_file_private_t));
if (priv == NULL)
{
return NULL;
}
memset(priv, 0, sizeof(nv_dma_buf_file_private_t));
mutex_init(&priv->lock);
NV_KMALLOC(priv->handles, num_handles * sizeof(priv->handles[0]));
if (priv->handles == NULL)
{
goto failed;
}
memset(priv->handles, 0, num_handles * sizeof(priv->handles[0]));
return priv;
failed:
nv_dma_buf_free_file_private(priv);
return NULL;
}
// Must be called with RMAPI lock and GPU lock taken
static void
nv_dma_buf_undup_mem_handles_unlocked(
nvidia_stack_t *sp,
NvU32 index,
NvU32 num_objects,
nv_dma_buf_file_private_t *priv
)
{
NvU32 i = 0;
for (i = index; i < num_objects; i++)
{
if (priv->handles[i].h_memory == 0)
{
continue;
}
rm_dma_buf_undup_mem_handle(sp, priv->nv, priv->h_client,
priv->handles[i].h_memory);
priv->attached_size -= priv->handles[i].size;
priv->handles[i].h_memory = 0;
priv->handles[i].offset = 0;
priv->handles[i].size = 0;
priv->num_objects--;
}
}
static void
nv_dma_buf_undup_mem_handles(
nvidia_stack_t *sp,
NvU32 index,
NvU32 num_objects,
nv_dma_buf_file_private_t *priv
)
{
NV_STATUS status;
status = rm_acquire_api_lock(sp);
if (WARN_ON(status != NV_OK))
{
return;
}
status = rm_acquire_all_gpus_lock(sp);
if (WARN_ON(status != NV_OK))
{
goto unlock_api_lock;
}
nv_dma_buf_undup_mem_handles_unlocked(sp, index, num_objects, priv);
rm_release_all_gpus_lock(sp);
unlock_api_lock:
rm_release_api_lock(sp);
}
static NV_STATUS
nv_dma_buf_dup_mem_handles(
nvidia_stack_t *sp,
nv_dma_buf_file_private_t *priv,
nv_ioctl_export_to_dma_buf_fd_t *params
)
{
NV_STATUS status = NV_OK;
NvU32 index = params->index;
NvU32 count = 0;
NvU32 i = 0;
status = rm_acquire_api_lock(sp);
if (status != NV_OK)
{
return status;
}
status = rm_acquire_gpu_lock(sp, priv->nv);
if (status != NV_OK)
{
goto unlock_api_lock;
}
for (i = 0; i < params->numObjects; i++)
{
NvHandle h_memory_duped = 0;
if (priv->handles[index].h_memory != 0)
{
status = NV_ERR_IN_USE;
goto failed;
}
if (params->sizes[i] > priv->total_size - priv->attached_size)
{
status = NV_ERR_INVALID_ARGUMENT;
goto failed;
}
status = rm_dma_buf_dup_mem_handle(sp, priv->nv,
params->hClient,
priv->h_client,
priv->h_device,
priv->h_subdevice,
priv->mig_info,
params->handles[i],
params->offsets[i],
params->sizes[i],
&h_memory_duped);
if (status != NV_OK)
{
goto failed;
}
priv->attached_size += params->sizes[i];
priv->handles[index].h_memory = h_memory_duped;
priv->handles[index].offset = params->offsets[i];
priv->handles[index].size = params->sizes[i];
priv->num_objects++;
index++;
count++;
}
if ((priv->num_objects == priv->total_objects) &&
(priv->attached_size != priv->total_size))
{
status = NV_ERR_INVALID_ARGUMENT;
goto failed;
}
rm_release_gpu_lock(sp, priv->nv);
rm_release_api_lock(sp);
return NV_OK;
failed:
nv_dma_buf_undup_mem_handles_unlocked(sp, params->index, count, priv);
rm_release_gpu_lock(sp, priv->nv);
unlock_api_lock:
rm_release_api_lock(sp);
return status;
}
// Must be called with RMAPI lock and GPU lock taken
static void
nv_dma_buf_unmap_unlocked(
nvidia_stack_t *sp,
nv_dma_device_t *peer_dma_dev,
nv_dma_buf_file_private_t *priv,
struct sg_table *sgt,
NvU32 count
)
{
NV_STATUS status;
NvU32 i;
NvU64 dma_len;
NvU64 dma_addr;
NvU64 bar1_va;
NvBool bar1_unmap_needed;
struct scatterlist *sg = NULL;
bar1_unmap_needed = (priv->bar1_va_ref_count == 0);
for_each_sg(sgt->sgl, sg, count, i)
{
dma_addr = sg_dma_address(sg);
dma_len = priv->handles[i].size;
bar1_va = priv->handles[i].bar1_va;
WARN_ON(sg_dma_len(sg) != priv->handles[i].size);
nv_dma_unmap_peer(peer_dma_dev, (dma_len / os_page_size), dma_addr);
if (bar1_unmap_needed)
{
status = rm_dma_buf_unmap_mem_handle(sp, priv->nv, priv->h_client,
priv->handles[i].h_memory,
priv->handles[i].size,
priv->handles[i].bar1_va);
WARN_ON(status != NV_OK);
}
}
}
static struct sg_table*
nv_dma_buf_map(
struct dma_buf_attachment *attachment,
enum dma_data_direction direction
)
{
NV_STATUS status;
nvidia_stack_t *sp = NULL;
struct scatterlist *sg = NULL;
struct sg_table *sgt = NULL;
struct dma_buf *buf = attachment->dmabuf;
struct device *dev = attachment->dev;
nv_dma_buf_file_private_t *priv = buf->priv;
nv_dma_device_t peer_dma_dev = {{ 0 }};
NvBool bar1_map_needed;
NvBool bar1_unmap_needed;
NvU32 count = 0;
NvU32 i = 0;
int rc = 0;
//
// We support importers that are able to handle MMIO resources
// not backed by struct page. This will need to be revisited
// when dma-buf support for P9 will be added.
//
#if defined(NV_DMA_BUF_HAS_DYNAMIC_ATTACHMENT) && \
defined(NV_DMA_BUF_ATTACHMENT_HAS_PEER2PEER)
if (dma_buf_attachment_is_dynamic(attachment) &&
!attachment->peer2peer)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: failed to map dynamic attachment with no P2P support\n");
return NULL;
}
#endif
mutex_lock(&priv->lock);
if (priv->num_objects != priv->total_objects)
{
goto unlock_priv;
}
rc = nv_kmem_cache_alloc_stack(&sp);
if (rc != 0)
{
goto unlock_priv;
}
status = rm_acquire_api_lock(sp);
if (status != NV_OK)
{
goto free_sp;
}
status = rm_acquire_gpu_lock(sp, priv->nv);
if (status != NV_OK)
{
goto unlock_api_lock;
}
NV_KMALLOC(sgt, sizeof(struct sg_table));
if (sgt == NULL)
{
goto unlock_gpu_lock;
}
memset(sgt, 0, sizeof(struct sg_table));
//
// RM currently returns contiguous BAR1, so we create as many
// sg entries as the number of handles being mapped.
// When RM can alloc discontiguous BAR1, this code will need to be revisited.
//
rc = sg_alloc_table(sgt, priv->num_objects, GFP_KERNEL);
if (rc != 0)
{
goto free_sgt;
}
peer_dma_dev.dev = dev;
peer_dma_dev.addressable_range.limit = (NvU64)dev->dma_mask;
bar1_map_needed = bar1_unmap_needed = (priv->bar1_va_ref_count == 0);
for_each_sg(sgt->sgl, sg, priv->num_objects, i)
{
NvU64 dma_addr;
NvU64 dma_len;
if (bar1_map_needed)
{
status = rm_dma_buf_map_mem_handle(sp, priv->nv, priv->h_client,
priv->handles[i].h_memory,
priv->handles[i].offset,
priv->handles[i].size,
&priv->handles[i].bar1_va);
if (status != NV_OK)
{
goto unmap_handles;
}
}
dma_addr = priv->handles[i].bar1_va;
dma_len = priv->handles[i].size;
status = nv_dma_map_peer(&peer_dma_dev, priv->nv->dma_dev,
0x1, (dma_len / os_page_size), &dma_addr);
if (status != NV_OK)
{
if (bar1_unmap_needed)
{
// Unmap the recently mapped memory handle
(void) rm_dma_buf_unmap_mem_handle(sp, priv->nv, priv->h_client,
priv->handles[i].h_memory,
priv->handles[i].size,
priv->handles[i].bar1_va);
}
// Unmap remaining memory handles
goto unmap_handles;
}
sg_set_page(sg, NULL, dma_len, 0);
sg_dma_address(sg) = (dma_addr_t)dma_addr;
sg_dma_len(sg) = dma_len;
count++;
}
priv->bar1_va_ref_count++;
rm_release_gpu_lock(sp, priv->nv);
rm_release_api_lock(sp);
nv_kmem_cache_free_stack(sp);
mutex_unlock(&priv->lock);
return sgt;
unmap_handles:
nv_dma_buf_unmap_unlocked(sp, &peer_dma_dev, priv, sgt, count);
sg_free_table(sgt);
free_sgt:
NV_KFREE(sgt, sizeof(struct sg_table));
unlock_gpu_lock:
rm_release_gpu_lock(sp, priv->nv);
unlock_api_lock:
rm_release_api_lock(sp);
free_sp:
nv_kmem_cache_free_stack(sp);
unlock_priv:
mutex_unlock(&priv->lock);
return NULL;
}
static void
nv_dma_buf_unmap(
struct dma_buf_attachment *attachment,
struct sg_table *sgt,
enum dma_data_direction direction
)
{
NV_STATUS status;
struct dma_buf *buf = attachment->dmabuf;
struct device *dev = attachment->dev;
nvidia_stack_t *sp = NULL;
nv_dma_buf_file_private_t *priv = buf->priv;
nv_dma_device_t peer_dma_dev = {{ 0 }};
int rc = 0;
mutex_lock(&priv->lock);
if (priv->num_objects != priv->total_objects)
{
goto unlock_priv;
}
rc = nv_kmem_cache_alloc_stack(&sp);
if (WARN_ON(rc != 0))
{
goto unlock_priv;
}
status = rm_acquire_api_lock(sp);
if (WARN_ON(status != NV_OK))
{
goto free_sp;
}
status = rm_acquire_gpu_lock(sp, priv->nv);
if (WARN_ON(status != NV_OK))
{
goto unlock_api_lock;
}
peer_dma_dev.dev = dev;
peer_dma_dev.addressable_range.limit = (NvU64)dev->dma_mask;
priv->bar1_va_ref_count--;
nv_dma_buf_unmap_unlocked(sp, &peer_dma_dev, priv, sgt, priv->num_objects);
sg_free_table(sgt);
NV_KFREE(sgt, sizeof(struct sg_table));
rm_release_gpu_lock(sp, priv->nv);
unlock_api_lock:
rm_release_api_lock(sp);
free_sp:
nv_kmem_cache_free_stack(sp);
unlock_priv:
mutex_unlock(&priv->lock);
}
static void
nv_dma_buf_release(
struct dma_buf *buf
)
{
int rc = 0;
nvidia_stack_t *sp = NULL;
nv_dma_buf_file_private_t *priv = buf->priv;
nv_state_t *nv;
if (priv == NULL)
{
return;
}
nv = priv->nv;
rc = nv_kmem_cache_alloc_stack(&sp);
if (WARN_ON(rc != 0))
{
return;
}
nv_dma_buf_undup_mem_handles(sp, 0, priv->num_objects, priv);
rm_dma_buf_put_client_and_device(sp, priv->nv, priv->h_client, priv->h_device,
priv->h_subdevice, priv->mig_info);
nv_dma_buf_free_file_private(priv);
buf->priv = NULL;
nvidia_dev_put(nv->gpu_id, sp);
nv_kmem_cache_free_stack(sp);
return;
}
static int
nv_dma_buf_mmap(
struct dma_buf *buf,
struct vm_area_struct *vma
)
{
return -ENOTSUPP;
}
#if defined(NV_DMA_BUF_OPS_HAS_KMAP) || \
defined(NV_DMA_BUF_OPS_HAS_MAP)
static void*
nv_dma_buf_kmap_stub(
struct dma_buf *buf,
unsigned long page_num
)
{
return NULL;
}
static void
nv_dma_buf_kunmap_stub(
struct dma_buf *buf,
unsigned long page_num,
void *addr
)
{
return;
}
#endif
#if defined(NV_DMA_BUF_OPS_HAS_KMAP_ATOMIC) || \
defined(NV_DMA_BUF_OPS_HAS_MAP_ATOMIC)
static void*
nv_dma_buf_kmap_atomic_stub(
struct dma_buf *buf,
unsigned long page_num
)
{
return NULL;
}
static void
nv_dma_buf_kunmap_atomic_stub(
struct dma_buf *buf,
unsigned long page_num,
void *addr
)
{
return;
}
#endif
//
// Note: Some of the dma-buf operations are mandatory in some kernels.
// So stubs are added to prevent dma_buf_export() failure.
// The actual implementations of these interfaces is not really required
// for the export operation to work.
//
// Same functions are used for kmap*/map* because of this commit:
// f9b67f0014cb: dma-buf: Rename dma-ops to prevent conflict with kunmap_atomic
//
static const struct dma_buf_ops nv_dma_buf_ops = {
.map_dma_buf = nv_dma_buf_map,
.unmap_dma_buf = nv_dma_buf_unmap,
.release = nv_dma_buf_release,
.mmap = nv_dma_buf_mmap,
#if defined(NV_DMA_BUF_OPS_HAS_KMAP)
.kmap = nv_dma_buf_kmap_stub,
.kunmap = nv_dma_buf_kunmap_stub,
#endif
#if defined(NV_DMA_BUF_OPS_HAS_KMAP_ATOMIC)
.kmap_atomic = nv_dma_buf_kmap_atomic_stub,
.kunmap_atomic = nv_dma_buf_kunmap_atomic_stub,
#endif
#if defined(NV_DMA_BUF_OPS_HAS_MAP)
.map = nv_dma_buf_kmap_stub,
.unmap = nv_dma_buf_kunmap_stub,
#endif
#if defined(NV_DMA_BUF_OPS_HAS_MAP_ATOMIC)
.map_atomic = nv_dma_buf_kmap_atomic_stub,
.unmap_atomic = nv_dma_buf_kunmap_atomic_stub,
#endif
};
static NV_STATUS
nv_dma_buf_create(
nv_state_t *nv,
nv_ioctl_export_to_dma_buf_fd_t *params
)
{
int rc = 0;
NV_STATUS status;
nvidia_stack_t *sp = NULL;
struct dma_buf *buf = NULL;
nv_dma_buf_file_private_t *priv = NULL;
NvU32 gpu_id = nv->gpu_id;
if (!nv->dma_buf_supported)
{
return NV_ERR_NOT_SUPPORTED;
}
if (params->index > (params->totalObjects - params->numObjects))
{
return NV_ERR_INVALID_ARGUMENT;
}
priv = nv_dma_buf_alloc_file_private(params->totalObjects);
if (priv == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate dma-buf private\n");
return NV_ERR_NO_MEMORY;
}
priv->total_objects = params->totalObjects;
priv->total_size = params->totalSize;
priv->nv = nv;
rc = nv_kmem_cache_alloc_stack(&sp);
if (rc != 0)
{
status = NV_ERR_NO_MEMORY;
goto cleanup_priv;
}
rc = nvidia_dev_get(gpu_id, sp);
if (rc != 0)
{
status = NV_ERR_OPERATING_SYSTEM;
goto cleanup_sp;
}
status = rm_dma_buf_get_client_and_device(sp, priv->nv,
params->hClient,
&priv->h_client,
&priv->h_device,
&priv->h_subdevice,
&priv->mig_info);
if (status != NV_OK)
{
goto cleanup_device;
}
status = nv_dma_buf_dup_mem_handles(sp, priv, params);
if (status != NV_OK)
{
goto cleanup_client_and_device;
}
#if (NV_DMA_BUF_EXPORT_ARGUMENT_COUNT == 1)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
exp_info.ops = &nv_dma_buf_ops;
exp_info.size = params->totalSize;
exp_info.flags = O_RDWR | O_CLOEXEC;
exp_info.priv = priv;
buf = dma_buf_export(&exp_info);
}
#elif (NV_DMA_BUF_EXPORT_ARGUMENT_COUNT == 4)
buf = dma_buf_export(priv, &nv_dma_buf_ops,
params->totalSize, O_RDWR | O_CLOEXEC);
#elif (NV_DMA_BUF_EXPORT_ARGUMENT_COUNT == 5)
buf = dma_buf_export(priv, &nv_dma_buf_ops,
params->totalSize, O_RDWR | O_CLOEXEC, NULL);
#endif
if (IS_ERR(buf))
{
nv_printf(NV_DBG_ERRORS, "NVRM: failed to create dma-buf\n");
status = NV_ERR_OPERATING_SYSTEM;
goto cleanup_handles;
}
nv_kmem_cache_free_stack(sp);
rc = dma_buf_fd(buf, O_RDWR | O_CLOEXEC);
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: failed to get dma-buf file descriptor\n");
//
// If dma-buf is successfully created, the dup'd handles
// clean-up should be done by the release callback.
//
dma_buf_put(buf);
return NV_ERR_OPERATING_SYSTEM;
}
params->fd = rc;
return NV_OK;
cleanup_handles:
nv_dma_buf_undup_mem_handles(sp, 0, priv->num_objects, priv);
cleanup_client_and_device:
rm_dma_buf_put_client_and_device(sp, priv->nv, priv->h_client, priv->h_device,
priv->h_subdevice, priv->mig_info);
cleanup_device:
nvidia_dev_put(gpu_id, sp);
cleanup_sp:
nv_kmem_cache_free_stack(sp);
cleanup_priv:
nv_dma_buf_free_file_private(priv);
return status;
}
static NV_STATUS
nv_dma_buf_reuse(
nv_state_t *nv,
nv_ioctl_export_to_dma_buf_fd_t *params
)
{
int rc = 0;
NV_STATUS status = NV_OK;
nvidia_stack_t *sp = NULL;
struct dma_buf *buf = NULL;
nv_dma_buf_file_private_t *priv = NULL;
buf = dma_buf_get(params->fd);
if (IS_ERR(buf))
{
nv_printf(NV_DBG_ERRORS, "NVRM: failed to get dma-buf\n");
return NV_ERR_OPERATING_SYSTEM;
}
priv = buf->priv;
if (priv == NULL)
{
status = NV_ERR_OPERATING_SYSTEM;
goto cleanup_dmabuf;
}
rc = mutex_lock_interruptible(&priv->lock);
if (rc != 0)
{
status = NV_ERR_OPERATING_SYSTEM;
goto cleanup_dmabuf;
}
if (params->index > (priv->total_objects - params->numObjects))
{
status = NV_ERR_INVALID_ARGUMENT;
goto unlock_priv;
}
rc = nv_kmem_cache_alloc_stack(&sp);
if (rc != 0)
{
status = NV_ERR_NO_MEMORY;
goto unlock_priv;
}
status = nv_dma_buf_dup_mem_handles(sp, priv, params);
if (status != NV_OK)
{
goto cleanup_sp;
}
cleanup_sp:
nv_kmem_cache_free_stack(sp);
unlock_priv:
mutex_unlock(&priv->lock);
cleanup_dmabuf:
dma_buf_put(buf);
return status;
}
#endif // CONFIG_DMA_SHARED_BUFFER
NV_STATUS
nv_dma_buf_export(
nv_state_t *nv,
nv_ioctl_export_to_dma_buf_fd_t *params
)
{
#if defined(CONFIG_DMA_SHARED_BUFFER)
NV_STATUS status;
if ((params == NULL) ||
(params->totalSize == 0) ||
(params->numObjects == 0) ||
(params->totalObjects == 0) ||
(params->numObjects > NV_DMABUF_EXPORT_MAX_HANDLES) ||
(params->numObjects > params->totalObjects))
{
return NV_ERR_INVALID_ARGUMENT;
}
//
// If fd >= 0, dma-buf already exists with this fd, so get dma-buf from fd.
// If fd == -1, dma-buf is not created yet, so create it and then store
// additional handles.
//
if (params->fd == -1)
{
status = nv_dma_buf_create(nv, params);
}
else if (params->fd >= 0)
{
status = nv_dma_buf_reuse(nv, params);
}
else
{
status = NV_ERR_INVALID_ARGUMENT;
}
return status;
#else
return NV_ERR_NOT_SUPPORTED;
#endif // CONFIG_DMA_SHARED_BUFFER
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,412 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "os-interface.h"
#include "nv-linux.h"
#include "nv-reg.h"
#include "nv-frontend.h"
#if defined(MODULE_LICENSE)
MODULE_LICENSE("Dual MIT/GPL");
#endif
#if defined(MODULE_INFO)
MODULE_INFO(supported, "external");
#endif
#if defined(MODULE_VERSION)
MODULE_VERSION(NV_VERSION_STRING);
#endif
#ifdef MODULE_ALIAS_CHARDEV_MAJOR
MODULE_ALIAS_CHARDEV_MAJOR(NV_MAJOR_DEVICE_NUMBER);
#endif
/*
* MODULE_IMPORT_NS() is added by commit id 8651ec01daeda
* ("module: add support for symbol namespaces") in 5.4
*/
#if defined(MODULE_IMPORT_NS)
/*
* DMA_BUF namespace is added by commit id 16b0314aa746
* ("dma-buf: move dma-buf symbols into the DMA_BUF module namespace") in 5.16
*/
MODULE_IMPORT_NS(DMA_BUF);
#endif
static NvU32 nv_num_instances;
// lock required to protect table.
struct semaphore nv_module_table_lock;
// minor number table
nvidia_module_t *nv_minor_num_table[NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX + 1];
int nvidia_init_module(void);
void nvidia_exit_module(void);
/* EXPORTS to Linux Kernel */
int nvidia_frontend_open(struct inode *, struct file *);
int nvidia_frontend_close(struct inode *, struct file *);
unsigned int nvidia_frontend_poll(struct file *, poll_table *);
int nvidia_frontend_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
long nvidia_frontend_unlocked_ioctl(struct file *, unsigned int, unsigned long);
long nvidia_frontend_compat_ioctl(struct file *, unsigned int, unsigned long);
int nvidia_frontend_mmap(struct file *, struct vm_area_struct *);
/* character driver entry points */
static struct file_operations nv_frontend_fops = {
.owner = THIS_MODULE,
.poll = nvidia_frontend_poll,
#if defined(NV_FILE_OPERATIONS_HAS_IOCTL)
.ioctl = nvidia_frontend_ioctl,
#endif
.unlocked_ioctl = nvidia_frontend_unlocked_ioctl,
#if NVCPU_IS_X86_64 || NVCPU_IS_AARCH64
.compat_ioctl = nvidia_frontend_compat_ioctl,
#endif
.mmap = nvidia_frontend_mmap,
.open = nvidia_frontend_open,
.release = nvidia_frontend_close,
};
/* Helper functions */
static int add_device(nvidia_module_t *module, nv_linux_state_t *device, NvBool all)
{
NvU32 i;
int rc = -1;
// look for free a minor number and assign unique minor number to this device
for (i = 0; i <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN; i++)
{
if (nv_minor_num_table[i] == NULL)
{
nv_minor_num_table[i] = module;
device->minor_num = i;
if (all == NV_TRUE)
{
device = device->next;
if (device == NULL)
{
rc = 0;
break;
}
}
else
{
rc = 0;
break;
}
}
}
return rc;
}
static int remove_device(nvidia_module_t *module, nv_linux_state_t *device)
{
int rc = -1;
// remove this device from minor_number table
if ((device != NULL) && (nv_minor_num_table[device->minor_num] != NULL))
{
nv_minor_num_table[device->minor_num] = NULL;
device->minor_num = 0;
rc = 0;
}
return rc;
}
/* Export functions */
int nvidia_register_module(nvidia_module_t *module)
{
int rc = 0;
NvU32 ctrl_minor_num;
down(&nv_module_table_lock);
if (module->instance >= NV_MAX_MODULE_INSTANCES)
{
printk("NVRM: NVIDIA module instance %d registration failed.\n",
module->instance);
rc = -EINVAL;
goto done;
}
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
nv_minor_num_table[ctrl_minor_num] = module;
nv_num_instances++;
done:
up(&nv_module_table_lock);
return rc;
}
EXPORT_SYMBOL(nvidia_register_module);
int nvidia_unregister_module(nvidia_module_t *module)
{
int rc = 0;
NvU32 ctrl_minor_num;
down(&nv_module_table_lock);
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
if (nv_minor_num_table[ctrl_minor_num] == NULL)
{
printk("NVRM: NVIDIA module for %d instance does not exist\n",
module->instance);
rc = -1;
}
else
{
nv_minor_num_table[ctrl_minor_num] = NULL;
nv_num_instances--;
}
up(&nv_module_table_lock);
return rc;
}
EXPORT_SYMBOL(nvidia_unregister_module);
int nvidia_frontend_add_device(nvidia_module_t *module, nv_linux_state_t * device)
{
int rc = -1;
NvU32 ctrl_minor_num;
down(&nv_module_table_lock);
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
if (nv_minor_num_table[ctrl_minor_num] == NULL)
{
printk("NVRM: NVIDIA module for %d instance does not exist\n",
module->instance);
rc = -1;
}
else
{
rc = add_device(module, device, NV_FALSE);
}
up(&nv_module_table_lock);
return rc;
}
EXPORT_SYMBOL(nvidia_frontend_add_device);
int nvidia_frontend_remove_device(nvidia_module_t *module, nv_linux_state_t * device)
{
int rc = 0;
NvU32 ctrl_minor_num;
down(&nv_module_table_lock);
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
if (nv_minor_num_table[ctrl_minor_num] == NULL)
{
printk("NVRM: NVIDIA module for %d instance does not exist\n",
module->instance);
rc = -1;
}
else
{
rc = remove_device(module, device);
}
up(&nv_module_table_lock);
return rc;
}
EXPORT_SYMBOL(nvidia_frontend_remove_device);
int nvidia_frontend_open(
struct inode *inode,
struct file *file
)
{
int rc = -ENODEV;
nvidia_module_t *module = NULL;
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
down(&nv_module_table_lock);
module = nv_minor_num_table[minor_num];
if ((module != NULL) && (module->open != NULL))
{
// Increment the reference count of module to ensure that module does
// not get unloaded if its corresponding device file is open, for
// example nvidiaN.ko should not get unloaded if /dev/nvidiaN is open.
if (!try_module_get(module->owner))
{
up(&nv_module_table_lock);
return -ENODEV;
}
rc = module->open(inode, file);
if (rc < 0)
{
module_put(module->owner);
}
}
up(&nv_module_table_lock);
return rc;
}
int nvidia_frontend_close(
struct inode *inode,
struct file *file
)
{
int rc = -ENODEV;
nvidia_module_t *module = NULL;
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
module = nv_minor_num_table[minor_num];
if ((module != NULL) && (module->close != NULL))
{
rc = module->close(inode, file);
// Decrement the reference count of module.
module_put(module->owner);
}
return rc;
}
unsigned int nvidia_frontend_poll(
struct file *file,
poll_table *wait
)
{
unsigned int mask = 0;
struct inode *inode = NV_FILE_INODE(file);
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
nvidia_module_t *module = nv_minor_num_table[minor_num];
if ((module != NULL) && (module->poll != NULL))
mask = module->poll(file, wait);
return mask;
}
int nvidia_frontend_ioctl(
struct inode *inode,
struct file *file,
unsigned int cmd,
unsigned long i_arg)
{
int rc = -ENODEV;
nvidia_module_t *module = NULL;
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
module = nv_minor_num_table[minor_num];
if ((module != NULL) && (module->ioctl != NULL))
rc = module->ioctl(inode, file, cmd, i_arg);
return rc;
}
long nvidia_frontend_unlocked_ioctl(
struct file *file,
unsigned int cmd,
unsigned long i_arg
)
{
return nvidia_frontend_ioctl(NV_FILE_INODE(file), file, cmd, i_arg);
}
long nvidia_frontend_compat_ioctl(
struct file *file,
unsigned int cmd,
unsigned long i_arg
)
{
return nvidia_frontend_ioctl(NV_FILE_INODE(file), file, cmd, i_arg);
}
int nvidia_frontend_mmap(
struct file *file,
struct vm_area_struct *vma
)
{
int rc = -ENODEV;
struct inode *inode = NV_FILE_INODE(file);
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
nvidia_module_t *module = nv_minor_num_table[minor_num];
if ((module != NULL) && (module->mmap != NULL))
rc = module->mmap(file, vma);
return rc;
}
static int __init nvidia_frontend_init_module(void)
{
int status = 0;
// initialise nvidia module table;
nv_num_instances = 0;
memset(nv_minor_num_table, 0, sizeof(nv_minor_num_table));
NV_INIT_MUTEX(&nv_module_table_lock);
status = nvidia_init_module();
if (status < 0)
{
return status;
}
// register char device
status = register_chrdev(NV_MAJOR_DEVICE_NUMBER, "nvidia-frontend", &nv_frontend_fops);
if (status < 0)
{
printk("NVRM: register_chrdev() failed!\n");
nvidia_exit_module();
}
return status;
}
static void __exit nvidia_frontend_exit_module(void)
{
/*
* If this is the last nvidia_module to be unregistered, cleanup and
* unregister char dev
*/
if (nv_num_instances == 1)
{
unregister_chrdev(NV_MAJOR_DEVICE_NUMBER, "nvidia-frontend");
}
nvidia_exit_module();
}
module_init(nvidia_frontend_init_module);
module_exit(nvidia_frontend_exit_module);

View File

@@ -0,0 +1,47 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_FRONTEND_H_
#define _NV_FRONTEND_H_
#include "nvtypes.h"
#include "nv-linux.h"
#include "nv-register-module.h"
#define NV_MAX_MODULE_INSTANCES 8
#define NV_FRONTEND_MINOR_NUMBER(x) minor((x)->i_rdev)
#define NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX 255
#define NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN (NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - \
NV_MAX_MODULE_INSTANCES)
#define NV_FRONTEND_IS_CONTROL_DEVICE(x) ((x <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX) && \
(x > NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN))
int nvidia_frontend_add_device(nvidia_module_t *, nv_linux_state_t *);
int nvidia_frontend_remove_device(nvidia_module_t *, nv_linux_state_t *);
extern nvidia_module_t *nv_minor_num_table[];
#endif

View File

@@ -0,0 +1,264 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "os_gpio.h"
#define NV_GPIOF_DIR_IN (1 << 0)
/*!
* @brief Mapping array of OS GPIO function ID to OS function name,
* this name is used to get GPIO number from Device Tree.
*/
static const char *osMapGpioFunc[] = {
[NV_OS_GPIO_FUNC_HOTPLUG_A] = "os_gpio_hotplug_a",
[NV_OS_GPIO_FUNC_HOTPLUG_B] = "os_gpio_hotplug_b",
};
NV_STATUS NV_API_CALL nv_gpio_get_pin_state
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 *pinValue
)
{
int ret;
#if defined(NV_GPIO_GET_VALUE_PRESENT)
ret = gpio_get_value(pinNum);
#else
nv_printf(NV_DBG_ERRORS, "gpio_get_value not present\n");
return NV_ERR_GENERIC;
#endif
if (ret < 0)
{
nv_printf(NV_DBG_ERRORS, "%s: failed with err: %d\n",
__func__, ret);
return NV_ERR_GENERIC;
}
*pinValue = ret;
return NV_OK;
}
void NV_API_CALL nv_gpio_set_pin_state
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 pinValue
)
{
#if defined(NV_GPIO_SET_VALUE_PRESENT)
gpio_set_value(pinNum, pinValue);
#else
nv_printf(NV_DBG_ERRORS, "gpio_set_value not present\n");
#endif
}
NV_STATUS NV_API_CALL nv_gpio_set_pin_direction
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 direction
)
{
int ret;
if (direction)
{
#if defined(NV_GPIO_DIRECTION_INPUT_PRESENT)
ret = gpio_direction_input(pinNum);
#else
nv_printf(NV_DBG_ERRORS, "gpio_direction_input not present\n");
return NV_ERR_GENERIC;
#endif
}
else
{
#if defined(NV_GPIO_DIRECTION_OUTPUT_PRESENT)
ret = gpio_direction_output(pinNum, 0);
#else
nv_printf(NV_DBG_ERRORS, "gpio_direction_output not present\n");
return NV_ERR_GENERIC;
#endif
}
if (ret)
{
nv_printf(NV_DBG_ERRORS, "%s: failed with err: %d\n",
__func__, ret);
return NV_ERR_GENERIC;
}
return NV_OK;
}
NV_STATUS NV_API_CALL nv_gpio_get_pin_direction
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 *direction
)
{
/*!
* TODO: Commenting out until gpio_get_direction wrapper
* support is added in kernel.
*/
#if 0
int ret;
ret = nv_gpio_get_direction(pinNum);
if (ret)
{
nv_printf(NV_DBG_ERRORS, "%s: failed with err: %d\n",
__func__, ret);
return NV_ERR_GENERIC;
}
*direction = ret;
#endif
return NV_OK;
}
NV_STATUS NV_API_CALL nv_gpio_get_pin_number
(
nv_state_t *nv,
NvU32 function,
NvU32 *pinNum
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int rc;
#if defined(NV_OF_GET_NAME_GPIO_PRESENT)
rc = of_get_named_gpio(nvl->dev->of_node, osMapGpioFunc[function], 0);
#else
nv_printf(NV_DBG_ERRORS, "of_get_named_gpio not present\n");
return NV_ERR_GENERIC;
#endif
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "of_get_name_gpio failed for gpio - %s, rc - %d\n",
osMapGpioFunc[function], rc);
return NV_ERR_GENERIC;
}
*pinNum = rc;
#if defined(NV_DEVM_GPIO_REQUEST_ONE_PRESENT)
rc = devm_gpio_request_one(nvl->dev, *pinNum, NV_GPIOF_DIR_IN,
osMapGpioFunc[function]);
#else
nv_printf(NV_DBG_ERRORS, "devm_gpio_request_one not present\n");
return NV_ERR_GENERIC;
#endif
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "request gpio failed for gpio - %s, rc - %d\n",
osMapGpioFunc[function], rc);
return NV_ERR_GENERIC;
}
return NV_OK;
}
NvBool NV_API_CALL nv_gpio_get_pin_interrupt_status
(
nv_state_t *nv,
NvU32 pinNum,
NvU32 direction
)
{
NvU32 irqGpioPin;
NvU32 pinValue;
if (nv_get_current_irq_type(nv) != NV_SOC_IRQ_GPIO_TYPE)
{
return NV_FALSE;
}
nv_get_current_irq_priv_data(nv, &irqGpioPin);
if (pinNum != irqGpioPin)
{
return NV_FALSE;
}
#if defined(NV_GPIO_GET_VALUE_PRESENT)
pinValue = gpio_get_value(pinNum);
#else
nv_printf(NV_DBG_ERRORS, "gpio_get_value not present\n");
return NV_FALSE;
#endif
if (pinValue != direction)
{
return NV_FALSE;
}
return NV_TRUE;
}
NV_STATUS NV_API_CALL nv_gpio_set_pin_interrupt
(
nv_state_t * nv,
NvU32 pinNum,
NvU32 trigger_level
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int rc;
int irq_num;
#if defined(NV_GPIO_TO_IRQ_PRESENT)
irq_num = gpio_to_irq(pinNum);
#else
nv_printf(NV_DBG_ERRORS, "gpio_to_irq not present\n");
return NV_ERR_GENERIC;
#endif
/*
* TODO:Ignoring trigger_level as RM calls this function twice to set
* the level(rising/falling seperately) of interrupt for same gpio pin,
* so hardcoding the trigger_level to rising/falling during first
* registration as second time this registration fails as interrupt
* is already registered. For initial GPIO support let interrupt
* registration fail for second time, will plan to check the status if
* interrupt is already registered and skip for second time in the
* follow-up patch.
*/
rc = nv_request_soc_irq(nvl, irq_num, NV_SOC_IRQ_GPIO_TYPE,
(IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
IRQF_ONESHOT), pinNum);
if (rc < 0)
{
nv_printf(NV_DBG_ERRORS, "IRQ registration failed for gpio - %d, rc - %d\n",
pinNum, rc);
return NV_ERR_GENERIC;
}
return NV_OK;
}

View File

@@ -0,0 +1,80 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(NV_LINUX_NVHOST_T194_H_PRESENT)
#include <linux/nvhost.h>
#include <linux/nvhost_t194.h>
NV_STATUS nv_get_syncpoint_aperture
(
NvU32 syncpointId,
NvU64 *physAddr,
NvU64 *limit,
NvU32 *offset
)
{
struct platform_device *host1x_pdev = NULL;
phys_addr_t base;
size_t size;
#if NV_IS_EXPORT_SYMBOL_PRESENT_nvhost_get_default_device
host1x_pdev = nvhost_get_default_device();
if (host1x_pdev == NULL)
{
return NV_ERR_INVALID_DEVICE;
}
#endif
#if NV_IS_EXPORT_SYMBOL_PRESENT_nvhost_syncpt_unit_interface_get_aperture
nvhost_syncpt_unit_interface_get_aperture(
host1x_pdev, &base, &size);
#endif
*physAddr = base;
#if NV_IS_EXPORT_SYMBOL_PRESENT_nvhost_syncpt_unit_interface_get_byte_offset
*limit = nvhost_syncpt_unit_interface_get_byte_offset(1);
*offset = nvhost_syncpt_unit_interface_get_byte_offset(syncpointId);
#endif
return NV_OK;
}
#else
NV_STATUS nv_get_syncpoint_aperture
(
NvU32 syncpointId,
NvU64 *physAddr,
NvU64 *limit,
NvU32 *offset
)
{
return NV_ERR_NOT_SUPPORTED;
}
#endif

View File

@@ -0,0 +1,552 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2005-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include <linux/i2c.h>
#include "os-interface.h"
#include "nv-linux.h"
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
static int nv_i2c_algo_master_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], int num)
{
nv_state_t *nv = (nv_state_t *)adapter->algo_data;
unsigned int i = 0;
int rc;
NV_STATUS rmStatus = NV_OK;
nvidia_stack_t *sp = NULL;
const unsigned int supported_i2c_flags = I2C_M_RD
#if defined(I2C_M_DMA_SAFE)
| I2C_M_DMA_SAFE
#endif
;
rc = nv_kmem_cache_alloc_stack(&sp);
if (rc != 0)
{
return rc;
}
rc = -EIO;
for (i = 0; ((i < (unsigned int)num) && (rmStatus == NV_OK)); i++)
{
if (msgs[i].flags & ~supported_i2c_flags)
{
/* we only support basic I2C reads/writes, reject any other commands */
rc = -EINVAL;
nv_printf(NV_DBG_ERRORS, "NVRM: Unsupported I2C flags used. (flags:0x%08x)\n",
msgs[i].flags);
rmStatus = NV_ERR_INVALID_ARGUMENT;
}
else
{
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
(msgs[i].flags & I2C_M_RD) ?
NV_I2C_CMD_READ : NV_I2C_CMD_WRITE,
(NvU8)(msgs[i].addr & 0x7f), 0,
(NvU32)(msgs[i].len & 0xffffUL),
(NvU8 *)msgs[i].buf);
}
}
nv_kmem_cache_free_stack(sp);
return (rmStatus != NV_OK) ? rc : num;
}
static int nv_i2c_algo_smbus_xfer(
struct i2c_adapter *adapter,
u16 addr,
unsigned short flags,
char read_write,
u8 command,
int size,
union i2c_smbus_data *data
)
{
nv_state_t *nv = (nv_state_t *)adapter->algo_data;
int rc;
NV_STATUS rmStatus = NV_OK;
nvidia_stack_t *sp = NULL;
rc = nv_kmem_cache_alloc_stack(&sp);
if (rc != 0)
{
return rc;
}
rc = -EIO;
switch (size)
{
case I2C_SMBUS_QUICK:
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
(read_write == I2C_SMBUS_READ) ?
NV_I2C_CMD_SMBUS_QUICK_READ :
NV_I2C_CMD_SMBUS_QUICK_WRITE,
(NvU8)(addr & 0x7f), 0, 0, NULL);
break;
case I2C_SMBUS_BYTE:
if (read_write == I2C_SMBUS_READ)
{
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
NV_I2C_CMD_READ,
(NvU8)(addr & 0x7f), 0, 1,
(NvU8 *)&data->byte);
}
else
{
u8 data = command;
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
NV_I2C_CMD_WRITE,
(NvU8)(addr & 0x7f), 0, 1,
(NvU8 *)&data);
}
break;
case I2C_SMBUS_BYTE_DATA:
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
(read_write == I2C_SMBUS_READ) ?
NV_I2C_CMD_SMBUS_READ :
NV_I2C_CMD_SMBUS_WRITE,
(NvU8)(addr & 0x7f), (NvU8)command, 1,
(NvU8 *)&data->byte);
break;
case I2C_SMBUS_WORD_DATA:
if (read_write != I2C_SMBUS_READ)
{
data->block[1] = (data->word & 0xff);
data->block[2] = (data->word >> 8);
}
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
(read_write == I2C_SMBUS_READ) ?
NV_I2C_CMD_SMBUS_READ :
NV_I2C_CMD_SMBUS_WRITE,
(NvU8)(addr & 0x7f), (NvU8)command, 2,
(NvU8 *)&data->block[1]);
if (read_write == I2C_SMBUS_READ)
{
data->word = ((NvU16)data->block[1]) |
((NvU16)data->block[2] << 8);
}
break;
case I2C_SMBUS_BLOCK_DATA:
rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter,
(read_write == I2C_SMBUS_READ) ?
NV_I2C_CMD_SMBUS_BLOCK_READ :
NV_I2C_CMD_SMBUS_BLOCK_WRITE,
(NvU8)(addr & 0x7f), (NvU8)command,
sizeof(data->block),
(NvU8 *)data->block);
break;
default:
rc = -EINVAL;
rmStatus = NV_ERR_INVALID_ARGUMENT;
}
nv_kmem_cache_free_stack(sp);
return (rmStatus != NV_OK) ? rc : 0;
}
static u32 nv_i2c_algo_functionality(struct i2c_adapter *adapter)
{
nv_state_t *nv = (nv_state_t *)adapter->algo_data;
u32 ret = I2C_FUNC_I2C;
nvidia_stack_t *sp = NULL;
if (nv_kmem_cache_alloc_stack(&sp) != 0)
{
return 0;
}
if (rm_i2c_is_smbus_capable(sp, nv, adapter))
{
ret |= (I2C_FUNC_SMBUS_QUICK |
I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_BLOCK_DATA);
}
nv_kmem_cache_free_stack(sp);
return ret;
}
static struct i2c_algorithm nv_i2c_algo = {
.master_xfer = nv_i2c_algo_master_xfer,
.smbus_xfer = nv_i2c_algo_smbus_xfer,
.functionality = nv_i2c_algo_functionality,
};
struct i2c_adapter nv_i2c_adapter_prototype = {
.owner = THIS_MODULE,
.algo = &nv_i2c_algo,
.algo_data = NULL,
};
void* NV_API_CALL nv_i2c_add_adapter(nv_state_t *nv, NvU32 port)
{
NV_STATUS rmStatus;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct i2c_adapter *pI2cAdapter = NULL;
int osstatus = 0;
// get a i2c adapter
rmStatus = os_alloc_mem((void **)&pI2cAdapter,sizeof(struct i2c_adapter));
if (rmStatus != NV_OK)
return NULL;
// fill in with default structure
os_mem_copy(pI2cAdapter, &nv_i2c_adapter_prototype, sizeof(struct i2c_adapter));
pI2cAdapter->dev.parent = nvl->dev;
if (nvl->pci_dev != NULL)
{
snprintf(pI2cAdapter->name, sizeof(pI2cAdapter->name),
"NVIDIA i2c adapter %u at %x:%02x.%u", port, nv->pci_info.bus,
nv->pci_info.slot, PCI_FUNC(nvl->pci_dev->devfn));
}
else
{
snprintf(pI2cAdapter->name, sizeof(pI2cAdapter->name),
"NVIDIA SOC i2c adapter %u", port);
}
// add our data to the structure
pI2cAdapter->algo_data = (void *)nv;
// attempt to register with the kernel
osstatus = i2c_add_adapter(pI2cAdapter);
if (osstatus)
{
// free the memory and NULL the ptr
os_free_mem(pI2cAdapter);
pI2cAdapter = NULL;
}
return ((void *)pI2cAdapter);
}
void NV_API_CALL nv_i2c_del_adapter(nv_state_t *nv, void *data)
{
struct i2c_adapter *pI2cAdapter = (struct i2c_adapter *)data;
if (pI2cAdapter)
{
// release with the OS
i2c_del_adapter(pI2cAdapter);
os_free_mem(pI2cAdapter);
}
}
static struct i2c_client * nv_i2c_register_client(
nv_state_t *nv,
NvU32 linuxI2CSwPort,
NvU8 address)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct i2c_adapter *i2c_adapter;
struct i2c_client *client;
int c_index;
struct i2c_board_info i2c_dev_info = {
.type = "tegra_display",
.addr = address,
};
/* Get the adapter using i2c port */
i2c_adapter = i2c_get_adapter(linuxI2CSwPort);
if (i2c_adapter == NULL)
{
nv_printf(NV_DBG_ERRORS, "Unable to get i2c adapter for port(%d)",
linuxI2CSwPort);
return NULL;
}
#if defined(NV_I2C_NEW_CLIENT_DEVICE_PRESENT)
client = i2c_new_client_device(i2c_adapter, &i2c_dev_info);
#else
nv_printf(NV_DBG_ERRORS, "nv_i2c_new_device not present\n");
client = NULL;
#endif
if (client == NULL)
{
nv_printf(NV_DBG_ERRORS, "Unable to register client for address(0x%x)",
address);
i2c_put_adapter(i2c_adapter);
return NULL;
}
i2c_put_adapter(i2c_adapter);
/* Save the Port and i2c client */
nvl->i2c_clients[linuxI2CSwPort].port = linuxI2CSwPort;
for (c_index = 0; c_index < MAX_CLIENTS_PER_ADAPTER; c_index++)
{
if (nvl->i2c_clients[linuxI2CSwPort].pOsClient[c_index] == NULL)
{
nvl->i2c_clients[linuxI2CSwPort].pOsClient[c_index] = client;
break;
}
}
return client;
}
static struct i2c_client *nv_i2c_get_registered_client(
nv_state_t *nv,
NvU32 linuxI2CSwPort,
NvU8 address)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int c_index;
for (c_index = 0; c_index < MAX_CLIENTS_PER_ADAPTER; c_index++)
{
struct i2c_client *client;
client = (struct i2c_client *)nvl->i2c_clients[linuxI2CSwPort].pOsClient[c_index];
if (client)
{
if (address == (NvU8)client->addr)
{
return client;
}
}
else
{
break;
}
}
return NULL;
}
NV_STATUS NV_API_CALL nv_i2c_transfer(
nv_state_t *nv,
NvU32 physicalI2CPort,
NvU8 address,
nv_i2c_msg_t *nv_msgs,
int num_msgs
)
{
struct i2c_client *client;
struct i2c_msg *msgs;
int count;
int rc;
NV_STATUS status = NV_OK;
NvU32 linuxI2CSwPort;
//
// RM style slave address is 8-bit addressing, but Linux use 7-bit
// addressing, so convert to 7-bit addressing format.
//
address = address >> 1;
//
// Linux Tegra I2C controller driver uses logical port(controller) number
// where logical port number of I2C1(Gen1) controller is 0, logical port
// number for I2C2(Gen2) controller is 1 and so on.
// But RM passes I2C physical port(controller) number i.e RM passes "1"
// for I2C1(Gen1), 2 for I2C2(Gen2), etc. So convert physical port number
// to logical port number(linuxI2CSwPort).
//
linuxI2CSwPort = physicalI2CPort - 1;
//
// Check if its valid port
//
if (!(linuxI2CSwPort >= 0 && linuxI2CSwPort < MAX_TEGRA_I2C_PORTS))
{
nv_printf(NV_DBG_ERRORS, "Invalid I2C port:%d\n", linuxI2CSwPort);
return NV_ERR_INVALID_ARGUMENT;
}
client = nv_i2c_get_registered_client(nv, linuxI2CSwPort, address);
if (client == NULL)
{
client = nv_i2c_register_client(nv, linuxI2CSwPort, address);
if (client == NULL)
{
nv_printf(NV_DBG_ERRORS, "i2c client register failed for addr:0x%x\n",
address);
return NV_ERR_GENERIC;
}
}
msgs = kzalloc((num_msgs * sizeof(*msgs)), GFP_KERNEL);
if (msgs == NULL)
{
nv_printf(NV_DBG_ERRORS, "i2c message allocation failed\n");
return NV_ERR_NO_MEMORY;
}
for (count = 0; count < num_msgs; count++) {
msgs[count].addr = client->addr;
msgs[count].flags = nv_msgs[count].flags;
msgs[count].len = nv_msgs[count].len;
msgs[count].buf = nv_msgs[count].buf;
}
rc = i2c_transfer(client->adapter, msgs, num_msgs);
if (rc != num_msgs)
{
nv_printf(NV_DBG_ERRORS, "i2c transfer failed for addr:0x%x",
address);
status = NV_ERR_GENERIC;
}
kfree(msgs);
return status;
}
void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int p_index, c_index;
for (p_index = 0; p_index < MAX_TEGRA_I2C_PORTS; p_index++)
{
for (c_index = 0;
c_index < MAX_CLIENTS_PER_ADAPTER;
c_index++)
{
struct i2c_client *client;
client = (struct i2c_client *)nvl->i2c_clients[p_index].pOsClient[c_index];
if (client)
{
#if defined(NV_I2C_UNREGISTER_DEVICE_PRESENT)
i2c_unregister_device(client);
#else
nv_printf(NV_DBG_ERRORS, "i2c_unregister_device not present\n");
#endif
nvl->i2c_clients[p_index].pOsClient[c_index] = NULL;
}
}
}
}
NV_STATUS NV_API_CALL nv_i2c_bus_status(
nv_state_t *nv,
NvU32 physicalI2CPort,
NvS32 *scl,
NvS32 *sda)
{
NvU32 linuxI2CSwPort;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct i2c_adapter *i2c_adapter;
int ret;
//
// Linux Tegra I2C controller driver uses logical port(controller) number
// where logical port number of I2C1(Gen1) controller is 0, logical port
// number for I2C2(Gen2) controller is 1 and so on.
// But RM passes I2C physical port(controller) number i.e RM passes "1"
// for I2C1(Gen1), 2 for I2C2(Gen2), etc. So convert physical port number
// to logical port number(linuxI2CSwPort).
//
linuxI2CSwPort = physicalI2CPort - 1;
//
// Check if its valid port
//
if (!(linuxI2CSwPort >= 0 && linuxI2CSwPort < MAX_TEGRA_I2C_PORTS))
{
nv_printf(NV_DBG_ERRORS, "Invalid I2C port:%d\n", linuxI2CSwPort);
return NV_ERR_INVALID_ARGUMENT;
}
/* Get the adapter using i2c port */
i2c_adapter = i2c_get_adapter(linuxI2CSwPort);
if (i2c_adapter == NULL)
{
nv_printf(NV_DBG_ERRORS, "Unable to get i2c adapter for port(%d)",
linuxI2CSwPort);
return NULL;
}
ret = i2c_bus_status(i2c_adapter, scl, sda);
if (ret < 0)
{
nv_printf(NV_DBG_ERRORS, "i2c_bus_status failed:%d\n", ret);
return NV_ERR_GENERIC;
}
i2c_put_adapter(i2c_adapter);
return NV_OK;
}
#else // defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
void NV_API_CALL nv_i2c_del_adapter(nv_state_t *nv, void *data)
{
}
void* NV_API_CALL nv_i2c_add_adapter(nv_state_t *nv, NvU32 port)
{
return NULL;
}
NV_STATUS NV_API_CALL nv_i2c_transfer(
nv_state_t *nv,
NvU32 physicalI2CPort,
NvU8 address,
nv_i2c_msg_t *nv_msgs,
int num_msgs
)
{
return NV_OK;
}
void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *nv)
{
}
NV_STATUS NV_API_CALL nv_i2c_bus_status(
nv_state_t *nv,
NvU32 physicalI2CPort,
NvS32 *scl,
NvS32 *sda)
{
return NV_ERR_GENERIC;
}
#endif // defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)

View File

@@ -0,0 +1,448 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* nv-ibmnpu.c - interface with the ibmnpu (IBM NVLink Processing Unit) "module"
*/
#include "nv-linux.h"
#if defined(NVCPU_PPC64LE)
#include "nv-ibmnpu.h"
#include "nv-rsync.h"
/*
* Temporary query to get the L1D cache block size directly from the device
* tree for the offline cache flush workaround, since the ppc64_caches symbol
* is unavailable to us.
*/
const NvU32 P9_L1D_CACHE_DEFAULT_BLOCK_SIZE = 0x80;
#if defined(NV_OF_GET_PROPERTY_PRESENT)
static NvU32 nv_ibm_get_cpu_l1d_cache_block_size(void)
{
const __be32 *block_size_prop;
/*
* Attempt to look up the block size from device tree. If unavailable, just
* return the default that we see on these systems.
*/
struct device_node *cpu = of_find_node_by_type(NULL, "cpu");
if (!cpu)
{
return P9_L1D_CACHE_DEFAULT_BLOCK_SIZE;
}
block_size_prop = of_get_property(cpu, "d-cache-block-size", NULL);
if (!block_size_prop)
{
return P9_L1D_CACHE_DEFAULT_BLOCK_SIZE;
}
return be32_to_cpu(*block_size_prop);
}
#else
static NvU32 nv_ibm_get_cpu_l1d_cache_block_size(void)
{
return P9_L1D_CACHE_DEFAULT_BLOCK_SIZE;
}
#endif
/*
* GPU device memory can be exposed to the kernel as NUMA node memory via the
* IBMNPU devices associated with the GPU. The platform firmware will specify
* the parameters of where the memory lives in the system address space via
* firmware properties on the IBMNPU devices. These properties specify what
* memory can be accessed through the IBMNPU device, and the driver can online
* a GPU device's memory into the range accessible by its associated IBMNPU
* devices.
*
* This function calls over to the IBMNPU driver to query the parameters from
* firmware, and validates that the resulting parameters are acceptable.
*/
static void nv_init_ibmnpu_numa_info(nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
nv_npu_numa_info_t *npu_numa_info = &nvl->npu->numa_info;
struct pci_dev *npu_dev = nvl->npu->devs[0];
NvU64 spa, gpa, aper_size;
/*
* Terminology:
* - system physical address (spa): 47-bit NVIDIA physical address, which
* is the CPU real address with the NVLink address compression scheme
* already applied in firmware.
* - guest physical address (gpa): 56-bit physical address as seen by the
* operating system. This is the base address that we should use for
* onlining device memory.
*/
nvl->numa_info.node_id = ibmnpu_device_get_memory_config(npu_dev, &spa, &gpa,
&aper_size);
if (nvl->numa_info.node_id == NUMA_NO_NODE)
{
NV_DEV_PRINTF(NV_DBG_SETUP, nv, "No NUMA memory aperture found\n");
return;
}
/* Validate that the compressed system physical address is not too wide */
if (spa & (~(BIT_ULL(nv_volta_dma_addr_size) - 1)))
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
"Invalid NUMA memory system pa 0x%llx"
" on IBM-NPU device %04x:%02x:%02x.%u\n",
spa, NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev),
NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn));
goto invalid_numa_config;
}
/*
* Validate that the guest physical address is aligned to 128GB.
* This alignment requirement comes from the Volta address space
* size on POWER9.
*/
if (!IS_ALIGNED(gpa, BIT_ULL(nv_volta_addr_space_width)))
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
"Invalid alignment in NUMA memory guest pa 0x%llx"
" on IBM-NPU device %04x:%02x:%02x.%u\n",
gpa, NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev),
NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn));
goto invalid_numa_config;
}
/* Validate that the aperture can map all of the device's framebuffer */
if (aper_size < nv->fb->size)
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
"Insufficient NUMA memory aperture size 0x%llx"
" on IBM-NPU device %04x:%02x:%02x.%u (0x%llx required)\n",
aper_size, NV_PCI_DOMAIN_NUMBER(npu_dev),
NV_PCI_BUS_NUMBER(npu_dev), NV_PCI_SLOT_NUMBER(npu_dev),
PCI_FUNC(npu_dev->devfn), nv->fb->size);
goto invalid_numa_config;
}
npu_numa_info->compr_sys_phys_addr = spa;
npu_numa_info->guest_phys_addr = gpa;
if (NVreg_EnableUserNUMAManagement)
{
NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_OFFLINE);
}
else
{
NV_DEV_PRINTF(NV_DBG_SETUP, nv, "User-mode NUMA onlining disabled.\n");
nvl->numa_info.node_id = NUMA_NO_NODE;
}
NV_DEV_PRINTF(NV_DBG_SETUP, nv, "NUMA memory aperture: "
"[spa = 0x%llx, gpa = 0x%llx, aper_size = 0x%llx]\n",
spa, gpa, aper_size);
/* Get the CPU's L1D cache block size for offlining cache flush */
npu_numa_info->l1d_cache_block_size = nv_ibm_get_cpu_l1d_cache_block_size();
return;
invalid_numa_config:
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
"NUMA memory aperture disabled due to invalid firmware configuration\n");
nvl->numa_info.node_id = NUMA_NO_NODE;
}
void nv_init_ibmnpu_info(nv_state_t *nv)
{
#if defined(NV_PNV_PCI_GET_NPU_DEV_PRESENT)
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct pci_dev *npu_dev = pnv_pci_get_npu_dev(nvl->pci_dev, 0);
NvU8 dev_count;
if (!npu_dev)
{
return;
}
if (os_alloc_mem((void **)&nvl->npu, sizeof(nv_ibmnpu_info_t)) != NV_OK)
{
return;
}
os_mem_set(nvl->npu, 0, sizeof(nv_ibmnpu_info_t));
/* Find any other IBMNPU devices attached to this GPU */
for (nvl->npu->devs[0] = npu_dev, dev_count = 1;
dev_count < NV_MAX_ATTACHED_IBMNPUS; dev_count++)
{
nvl->npu->devs[dev_count] = pnv_pci_get_npu_dev(nvl->pci_dev, dev_count);
if (!nvl->npu->devs[dev_count])
{
break;
}
}
nvl->npu->dev_count = dev_count;
/*
* If we run out of space for IBMNPU devices, NV_MAX_ATTACHED_IBMNPUS will
* need to be bumped.
*/
WARN_ON((dev_count == NV_MAX_ATTACHED_IBMNPUS) &&
pnv_pci_get_npu_dev(nvl->pci_dev, dev_count));
ibmnpu_device_get_genregs_info(npu_dev, &nvl->npu->genregs);
if (nvl->npu->genregs.size > 0)
{
NV_DEV_PRINTF(NV_DBG_SETUP, nv,
"IBM-NPU device %04x:%02x:%02x.%u associated with GPU "
" has a generation register space 0x%llx-0x%llx\n",
NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev),
NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn),
nvl->npu->genregs.start_addr,
nvl->npu->genregs.start_addr + nvl->npu->genregs.size - 1);
}
else
{
NV_DEV_PRINTF(NV_DBG_SETUP, nv,
"IBM-NPU device %04x:%02x:%02x.%u associated with GPU "
"does not support generation registers\n",
NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev),
NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn));
}
nv_init_ibmnpu_numa_info(nv);
#endif
}
void nv_destroy_ibmnpu_info(nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (nvl->npu != NULL)
{
os_free_mem(nvl->npu);
nvl->npu = NULL;
}
}
int nv_init_ibmnpu_devices(nv_state_t *nv)
{
NvU8 i;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (!nvl->npu)
{
return 0;
}
for (i = 0; i < nvl->npu->dev_count; i++)
{
NV_DEV_PRINTF(NV_DBG_SETUP, nv,
"Initializing IBM-NPU device %04x:%02x:%02x.%u\n",
NV_PCI_DOMAIN_NUMBER(nvl->npu->devs[i]),
NV_PCI_BUS_NUMBER(nvl->npu->devs[i]),
NV_PCI_SLOT_NUMBER(nvl->npu->devs[i]),
PCI_FUNC(nvl->npu->devs[i]->devfn));
if (ibmnpu_init_device(nvl->npu->devs[i]) != NVL_SUCCESS)
{
nv_unregister_ibmnpu_devices(nv);
return -EIO;
}
nvl->npu->initialized_dev_count++;
}
return 0;
}
void nv_unregister_ibmnpu_devices(nv_state_t *nv)
{
NvU8 i;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (!nvl->npu)
{
return;
}
for (i = 0; i < nvl->npu->initialized_dev_count; i++)
{
NV_DEV_PRINTF(NV_DBG_SETUP, nv,
"Unregistering IBM-NPU device %04x:%02x:%02x.%u\n",
NV_PCI_DOMAIN_NUMBER(nvl->npu->devs[i]),
NV_PCI_BUS_NUMBER(nvl->npu->devs[i]),
NV_PCI_SLOT_NUMBER(nvl->npu->devs[i]),
PCI_FUNC(nvl->npu->devs[i]->devfn));
ibmnpu_unregister_device(nvl->npu->devs[i]);
}
nvl->npu->initialized_dev_count = 0;
}
NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *nv, NvU64 *addr,
NvU64 *size, void **device)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (nvl->npu == NULL || nvl->npu->genregs.size == 0)
{
return NV_ERR_NOT_SUPPORTED;
}
if (addr)
{
*addr = nvl->npu->genregs.start_addr;
}
if (size)
{
*size = nvl->npu->genregs.size;
}
if (device)
{
*device = (void*)nvl->npu->devs[0];
}
return NV_OK;
}
NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv,
NvBool *mode)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (nvl->npu == NULL || nvl->npu->genregs.size == 0)
{
return NV_ERR_NOT_SUPPORTED;
}
*mode = nv_get_rsync_relaxed_ordering_mode(nv);
return NV_OK;
}
void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (nvl->npu == NULL || nvl->npu->genregs.size == 0)
{
return;
}
nv_wait_for_rsync(nv);
}
int nv_get_ibmnpu_chip_id(nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
if (nvl->npu == NULL)
{
return -1;
}
return ibmnpu_device_get_chip_id(nvl->npu->devs[0]);
}
void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvU64 offset, cbsize;
/*
* The range is commonly an ioremap()ed mapping of the GPU's ATS range and
* needs to be compared against the created mappings. Alternatively, kernel
* page tables can be dumped through sysfs if CONFIG_PPC_PTDUMP is enabled.
*/
NV_DEV_PRINTF(NV_DBG_INFO, nv,
"Flushing CPU virtual range [0x%llx, 0x%llx)\n",
cpu_virtual, cpu_virtual + size);
cbsize = nvl->npu->numa_info.l1d_cache_block_size;
CACHE_FLUSH();
/* Force eviction of any cache lines from the NUMA-onlined region. */
for (offset = 0; offset < size; offset += cbsize)
{
asm volatile("dcbf %0,%1" :: "r" (cpu_virtual), "r" (offset) : "memory");
/* Reschedule if necessary to avoid lockup warnings */
cond_resched();
}
CACHE_FLUSH();
}
#else
void nv_init_ibmnpu_info(nv_state_t *nv)
{
}
void nv_destroy_ibmnpu_info(nv_state_t *nv)
{
}
int nv_init_ibmnpu_devices(nv_state_t *nv)
{
return 0;
}
void nv_unregister_ibmnpu_devices(nv_state_t *nv)
{
}
NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *nv, NvU64 *addr,
NvU64 *size, void **device)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv,
NvBool *mode)
{
return NV_ERR_NOT_SUPPORTED;
}
void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv)
{
}
int nv_get_ibmnpu_chip_id(nv_state_t *nv)
{
return -1;
}
void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64 virtual, NvU64 size)
{
}
void nv_ibmnpu_cache_flush_numa_region(nv_state_t *nv)
{
}
#endif

View File

@@ -0,0 +1,80 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_IBMNPU_H_
#define _NV_IBMNPU_H_
#if defined(NVCPU_PPC64LE)
#include "ibmnpu_linux.h"
#define NV_MAX_ATTACHED_IBMNPUS 6
typedef struct nv_npu_numa_info
{
/*
* 47-bit NVIDIA 'system physical address': the hypervisor real 56-bit
* address with NVLink address compression scheme applied.
*/
NvU64 compr_sys_phys_addr;
/*
* 56-bit NVIDIA 'guest physical address'/host virtual address. On
* unvirtualized systems, applying the NVLink address compression scheme
* to this address should be the same as compr_sys_phys_addr.
*/
NvU64 guest_phys_addr;
/*
* L1 data cache block size on P9 - needed to manually flush/invalidate the
* NUMA region from the CPU caches after offlining.
*/
NvU32 l1d_cache_block_size;
} nv_npu_numa_info_t;
struct nv_ibmnpu_info
{
NvU8 dev_count;
NvU8 initialized_dev_count;
struct pci_dev *devs[NV_MAX_ATTACHED_IBMNPUS];
ibmnpu_genregs_info_t genregs;
nv_npu_numa_info_t numa_info;
};
/*
* TODO: These parameters are specific to Volta/P9 configurations, and may
* need to be determined dynamically in the future.
*/
static const NvU32 nv_volta_addr_space_width = 37;
static const NvU32 nv_volta_dma_addr_size = 47;
#endif
void nv_init_ibmnpu_info(nv_state_t *nv);
void nv_destroy_ibmnpu_info(nv_state_t *nv);
int nv_init_ibmnpu_devices(nv_state_t *nv);
void nv_unregister_ibmnpu_devices(nv_state_t *nv);
int nv_get_ibmnpu_chip_id(nv_state_t *nv);
void nv_ibmnpu_cache_flush_numa_region(nv_state_t *nv);
#endif

View File

@@ -0,0 +1,702 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#if defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT) || IS_ENABLED(CONFIG_TEGRA_BPMP)
#include <soc/tegra/bpmp-abi.h>
#endif
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
#include <soc/tegra/bpmp.h>
#elif defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT)
#include <soc/tegra/tegra_bpmp.h>
#endif // IS_ENABLED(CONFIG_TEGRA_BPMP)
#if defined NV_DT_BINDINGS_INTERCONNECT_TEGRA_ICC_ID_H_PRESENT
#include <dt-bindings/interconnect/tegra_icc_id.h>
#endif
#ifdef NV_LINUX_PLATFORM_TEGRA_MC_UTILS_H_PRESENT
#include <linux/platform/tegra/mc_utils.h>
#endif
//
// IMP requires information from various BPMP and MC driver functions. The
// macro below checks that all of the required functions are present.
//
#define IMP_SUPPORT_FUNCTIONS_PRESENT \
NV_IS_EXPORT_SYMBOL_PRESENT_dram_clk_to_mc_clk && \
NV_IS_EXPORT_SYMBOL_PRESENT_get_dram_num_channels && \
NV_IS_EXPORT_SYMBOL_PRESENT_tegra_dram_types && \
(defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT) || \
IS_ENABLED(CONFIG_TEGRA_BPMP)) && \
defined(NV_LINUX_PLATFORM_TEGRA_MC_UTILS_H_PRESENT)
//
// Also create a macro to check if all the required ICC symbols are present.
// DT endpoints are defined in dt-bindings/interconnect/tegra_icc_id.h.
//
#define ICC_SUPPORT_FUNCTIONS_PRESENT \
defined(NV_DT_BINDINGS_INTERCONNECT_TEGRA_ICC_ID_H_PRESENT)
#if IMP_SUPPORT_FUNCTIONS_PRESENT
static struct mrq_emc_dvfs_latency_response latency_table;
static struct mrq_emc_dvfs_emchub_response emchub_table;
static struct cmd_iso_client_get_max_bw_response max_bw_table;
/*!
* @brief Converts the MC driver dram type to RM format
*
* The MC driver's tegra_dram_types() function returns the dram type as an
* enum. We convert it to an NvU32 for better ABI compatibility when stored in
* the TEGRA_IMP_IMPORT_DATA structure, which is shared between various
* software components.
*
* @param[in] dram_type Dram type (DRAM_TYPE_LPDDRxxx format).
*
* @returns dram type (TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDRxxx format).
*/
static inline NvU32
nv_imp_convert_dram_type_to_rm_format
(
enum dram_types dram_type
)
{
NvU32 rm_dram_type;
switch (dram_type)
{
case DRAM_TYPE_LPDDR4_16CH_ECC_1RANK:
case DRAM_TYPE_LPDDR4_16CH_ECC_2RANK:
case DRAM_TYPE_LPDDR4_8CH_ECC_1RANK:
case DRAM_TYPE_LPDDR4_8CH_ECC_2RANK:
case DRAM_TYPE_LPDDR4_4CH_ECC_1RANK:
case DRAM_TYPE_LPDDR4_4CH_ECC_2RANK:
case DRAM_TYPE_LPDDR4_16CH_1RANK:
case DRAM_TYPE_LPDDR4_16CH_2RANK:
case DRAM_TYPE_LPDDR4_8CH_1RANK:
case DRAM_TYPE_LPDDR4_8CH_2RANK:
case DRAM_TYPE_LPDDR4_4CH_1RANK:
case DRAM_TYPE_LPDDR4_4CH_2RANK:
rm_dram_type = TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDR4;
break;
case DRAM_TYPE_LPDDR5_16CH_ECC_1RANK:
case DRAM_TYPE_LPDDR5_16CH_ECC_2RANK:
case DRAM_TYPE_LPDDR5_8CH_ECC_1RANK:
case DRAM_TYPE_LPDDR5_8CH_ECC_2RANK:
case DRAM_TYPE_LPDDR5_4CH_ECC_1RANK:
case DRAM_TYPE_LPDDR5_4CH_ECC_2RANK:
case DRAM_TYPE_LPDDR5_16CH_1RANK:
case DRAM_TYPE_LPDDR5_16CH_2RANK:
case DRAM_TYPE_LPDDR5_8CH_1RANK:
case DRAM_TYPE_LPDDR5_8CH_2RANK:
case DRAM_TYPE_LPDDR5_4CH_1RANK:
case DRAM_TYPE_LPDDR5_4CH_2RANK:
rm_dram_type = TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDR5;
break;
default:
rm_dram_type = TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_UNKNOWN;
break;
}
return rm_dram_type;
}
#endif // IMP_SUPPORT_FUNCTIONS_PRESENT
/*!
* @brief Collects IMP-relevant BPMP data and saves for later
*
* @param[in] nvl OS-specific device state
*
* @returns NV_OK if successful,
* NV_ERR_GENERIC if the BPMP API returns an error,
* NV_ERR_MISSING_TABLE_ENTRY if the latency table has no entries,
* NV_ERR_INVALID_DATA if the number of clock entries in the latency
* table does not match the number of entries in the emchub table, or
* NV_ERR_NOT_SUPPORTED if the functionality is not available.
*/
NV_STATUS
nv_imp_get_bpmp_data
(
nv_linux_state_t *nvl
)
{
#if IMP_SUPPORT_FUNCTIONS_PRESENT
NV_STATUS status = NV_OK;
int rc;
int i;
NvBool bApiTableInvalid = NV_FALSE;
static const struct iso_max_bw dummy_iso_bw_pairs[] =
{ { 204000U, 1472000U },
{ 533000U, 3520000U },
{ 665000U, 4352000U },
{ 800000U, 5184000U },
{ 1066000U, 6784000U },
{ 1375000U, 8704000U },
{ 1600000U, 10112000U },
{ 1866000U, 11712000U },
{ 2133000U, 13376000U },
{ 2400000U, 15040000U },
{ 2750000U, 17152000U },
{ 3000000U, 18688000U },
{ 3200000U, 20800000U }
};
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
struct tegra_bpmp *bpmp;
struct tegra_bpmp_message msg;
struct mrq_iso_client_request iso_client_request;
bpmp = tegra_bpmp_get(nvl->dev);
if (IS_ERR(bpmp))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Error getting bpmp struct: %s\n",
PTR_ERR(bpmp));
return NV_ERR_GENERIC;
}
// Get the table of dramclk / DVFS latency pairs.
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_EMC_DVFS_LATENCY;
msg.tx.data = NULL;
msg.tx.size = 0;
msg.rx.data = &latency_table;
msg.rx.size = sizeof(latency_table);
rc = tegra_bpmp_transfer(bpmp, &msg);
#else
// Get the table of dramclk / DVFS latency pairs.
rc = tegra_bpmp_send_receive(MRQ_EMC_DVFS_LATENCY,
NULL,
0,
&latency_table,
sizeof(latency_table));
#endif
if (rc != 0)
{
nv_printf(NV_DBG_ERRORS,
"MRQ_EMC_DVFS_LATENCY returns error code %d\n", rc);
status = NV_ERR_GENERIC;
goto Cleanup;
}
nv_printf(NV_DBG_INFO,
"MRQ_EMC_DVFS_LATENCY table size = %u\n",
latency_table.num_pairs);
if (latency_table.num_pairs == 0U)
{
nv_printf(NV_DBG_ERRORS,
"MRQ_EMC_DVFS_LATENCY table has no entries\n", rc);
status = NV_ERR_MISSING_TABLE_ENTRY;
goto Cleanup;
}
// Get the table of dramclk / emchubclk pairs.
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_EMC_DVFS_EMCHUB;
msg.tx.data = NULL;
msg.tx.size = 0;
msg.rx.data = &emchub_table;
msg.rx.size = sizeof(emchub_table);
rc = tegra_bpmp_transfer(bpmp, &msg);
#else
rc = tegra_bpmp_send_receive(MRQ_EMC_DVFS_EMCHUB,
NULL,
0,
&emchub_table,
sizeof(emchub_table));
#endif
if (rc != 0)
{
nv_printf(NV_DBG_ERRORS,
"MRQ_EMC_DVFS_EMCHUB returns error code %d\n", rc);
status = NV_ERR_GENERIC;
goto Cleanup;
}
nv_printf(NV_DBG_INFO,
"MRQ_EMC_DVFS_EMCHUB table size = %u\n",
emchub_table.num_pairs);
if (latency_table.num_pairs != emchub_table.num_pairs)
{
nv_printf(NV_DBG_ERRORS,
"MRQ_EMC_DVFS_LATENCY table size (%u) does not match MRQ_EMC_DVFS_EMCHUB table size (%u)\n",
latency_table.num_pairs,
emchub_table.num_pairs);
status = NV_ERR_INVALID_DATA;
goto Cleanup;
}
// Get the table of dramclk / max ISO BW pairs.
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
memset(&iso_client_request, 0, sizeof(iso_client_request));
iso_client_request.cmd = CMD_ISO_CLIENT_GET_MAX_BW;
iso_client_request.max_isobw_req.id = TEGRA_ICC_DISPLAY;
msg.mrq = MRQ_ISO_CLIENT;
msg.tx.data = &iso_client_request;
msg.tx.size = sizeof(iso_client_request);
msg.rx.data = &max_bw_table;
msg.rx.size = sizeof(max_bw_table);
rc = tegra_bpmp_transfer(bpmp, &msg);
#else
// Maybe we don't need the old implementation "else" clause cases anymore.
NV_ASSERT(NV_FALSE);
#endif
if ((rc != 0) || (max_bw_table.num_pairs == 0U))
{
if (rc != 0)
{
nv_printf(NV_DBG_ERRORS,
"MRQ_ISO_CLIENT returns error code %d\n", rc);
}
else
{
nv_printf(NV_DBG_ERRORS,
"CMD_ISO_CLIENT_GET_MAX_BW table does not contain any entries\n");
}
bApiTableInvalid = NV_TRUE;
}
else
{
//
// Check for entries with ISO BW = 0. It's possible that one entry may
// be zero, but they should not all be zero. (On simulation, due to bug
// 3379796, the API is currently not working; it returns 13 entries,
// each with ISO BW = 0.)
//
bApiTableInvalid = NV_TRUE;
for (i = 0; i < max_bw_table.num_pairs; i++)
{
if (max_bw_table.pairs[i].iso_bw != 0U)
{
bApiTableInvalid = NV_FALSE;
break;
}
}
}
if (bApiTableInvalid)
{
//
// If the table is not returned correctly, for now, fill in a dummy
// table.
//
nv_printf(NV_DBG_ERRORS,
"Creating dummy CMD_ISO_CLIENT_GET_MAX_BW table\n");
max_bw_table.num_pairs = sizeof(dummy_iso_bw_pairs) /
sizeof(dummy_iso_bw_pairs[0]);
for (i = 0; i < max_bw_table.num_pairs; i++)
{
max_bw_table.pairs[i].freq = dummy_iso_bw_pairs[i].freq;
max_bw_table.pairs[i].iso_bw = dummy_iso_bw_pairs[i].iso_bw;
}
}
nv_printf(NV_DBG_INFO,
"CMD_ISO_CLIENT_GET_MAX_BW table size = %u\n",
max_bw_table.num_pairs);
Cleanup:
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
tegra_bpmp_put(bpmp);
#endif
return status;
#else // IMP_SUPPORT_FUNCTIONS_PRESENT
return NV_ERR_NOT_SUPPORTED;
#endif
}
/*!
* @brief Returns IMP-relevant data collected from other modules
*
* @param[out] tegra_imp_import_data Structure to receive the data
*
* @returns NV_OK if successful,
* NV_ERR_BUFFER_TOO_SMALL if the array in TEGRA_IMP_IMPORT_DATA is
* too small,
* NV_ERR_INVALID_DATA if the latency table has different mclk
* frequencies, compared with the emchub table, or
* NV_ERR_NOT_SUPPORTED if the functionality is not available.
*/
NV_STATUS NV_API_CALL
nv_imp_get_import_data
(
TEGRA_IMP_IMPORT_DATA *tegra_imp_import_data
)
{
#if IMP_SUPPORT_FUNCTIONS_PRESENT
NvU32 i;
NvU32 bwTableIndex = 0U;
NvU32 dram_clk_freq_khz;
enum dram_types dram_type;
tegra_imp_import_data->num_dram_clk_entries = latency_table.num_pairs;
if (ARRAY_SIZE(tegra_imp_import_data->dram_clk_instance) <
latency_table.num_pairs)
{
nv_printf(NV_DBG_ERRORS,
"ERROR: TEGRA_IMP_IMPORT_DATA struct needs to have at least "
"%d dram_clk_instance entries, but only %d are allocated\n",
latency_table.num_pairs,
ARRAY_SIZE(tegra_imp_import_data->dram_clk_instance));
return NV_ERR_BUFFER_TOO_SMALL;
}
//
// Copy data that we collected earlier in the BPMP tables into the caller's
// IMP import structure.
//
for (i = 0U; i < latency_table.num_pairs; i++)
{
dram_clk_freq_khz = latency_table.pairs[i].freq;
//
// For each dramclk frequency, we get some information from the EMCHUB
// table and some information from the LATENCY table. We expect both
// tables to have entries for the same dramclk frequencies.
//
if (dram_clk_freq_khz != emchub_table.pairs[i].freq)
{
nv_printf(NV_DBG_ERRORS,
"MRQ_EMC_DVFS_LATENCY index #%d dramclk freq (%d KHz) does not match "
"MRQ_EMC_DVFS_EMCHUB index #%d dramclk freq (%d KHz)\n",
i, latency_table.pairs[i].freq,
i, emchub_table.pairs[i].freq);
return NV_ERR_INVALID_DATA;
}
// Copy a few values to the caller's table.
tegra_imp_import_data->dram_clk_instance[i].dram_clk_freq_khz =
dram_clk_freq_khz;
tegra_imp_import_data->dram_clk_instance[i].switch_latency_ns =
latency_table.pairs[i].latency;
tegra_imp_import_data->dram_clk_instance[i].mc_clk_khz =
dram_clk_to_mc_clk(dram_clk_freq_khz / 1000U) * 1000U;
// MC hubclk is 1/2 of scf clk, which is the same as EMCHUB clk.
tegra_imp_import_data->dram_clk_instance[i].mchub_clk_khz =
emchub_table.pairs[i].hub_freq / 2U;
//
// The ISO BW table may have more entries then the number of dramclk
// frequencies supported on current chip (i.e., more entries than we
// have in the EMCHUB and LATENCY tables). For each dramclk entry that
// we are filling out, search through the ISO BW table to find the
// largest dramclk less than or equal to the dramclk frequency for
// index "i", and use that ISO BW entry. (We assume all tables have
// their entries in order of increasing dramclk frequency.)
//
// Note: Some of the dramclk frequencies in the ISO BW table have been
// observed to be "rounded down" (e.g., 665000 KHz instead of 665600
// KHz).
//
while ((bwTableIndex + 1U < max_bw_table.num_pairs) &&
(dram_clk_freq_khz >= max_bw_table.pairs[bwTableIndex + 1U].freq))
{
nv_printf(NV_DBG_INFO,
"Max ISO BW table: index %u, dramclk = %u KHz, max ISO BW = %u KB/sec\n",
bwTableIndex,
max_bw_table.pairs[bwTableIndex].freq,
max_bw_table.pairs[bwTableIndex].iso_bw);
bwTableIndex++;
}
if (dram_clk_freq_khz >= max_bw_table.pairs[bwTableIndex].freq)
{
nv_printf(NV_DBG_INFO,
"For dramclk = %u KHz, setting max ISO BW = %u KB/sec\n",
dram_clk_freq_khz,
max_bw_table.pairs[bwTableIndex].iso_bw);
tegra_imp_import_data->dram_clk_instance[i].max_iso_bw_kbps =
max_bw_table.pairs[bwTableIndex].iso_bw;
}
else
{
//
// Something went wrong. Maybe the ISO BW table doesn't have any
// entries with dramclk frequency as small as the frequency in the
// EMCHUB and LATENCY tables, or maybe the entries are out of
// order.
//
nv_printf(NV_DBG_ERRORS,
"Couldn't get max ISO BW for dramclk = %u KHz\n",
dram_clk_freq_khz);
return NV_ERR_INVALID_DATA;
}
}
dram_type = tegra_dram_types();
tegra_imp_import_data->dram_type =
nv_imp_convert_dram_type_to_rm_format(dram_type);
tegra_imp_import_data->num_dram_channels = get_dram_num_channels();
// Record the overall maximum possible ISO BW.
i = latency_table.num_pairs - 1U;
tegra_imp_import_data->max_iso_bw_kbps =
tegra_imp_import_data->dram_clk_instance[i].max_iso_bw_kbps;
return NV_OK;
#else // IMP_SUPPORT_FUNCTIONS_PRESENT
return NV_ERR_NOT_SUPPORTED;
#endif
}
/*!
* @brief Tells BPMP whether or not RFL is valid
*
* Display HW generates an ok_to_switch signal which asserts when mempool
* occupancy is high enough to be able to turn off memory long enough to
* execute a dramclk frequency switch without underflowing display output.
* ok_to_switch drives the RFL ("request for latency") signal in the memory
* unit, and the switch sequencer waits for this signal to go active before
* starting a dramclk switch. However, if the signal is not valid (e.g., if
* display HW or SW has not been initialized yet), the switch sequencer ignores
* the signal. This API tells BPMP whether or not the signal is valid.
*
* @param[in] nv Per GPU Linux state
* @param[in] bEnable True if RFL will be valid; false if invalid
*
* @returns NV_OK if successful,
* NV_ERR_NOT_SUPPORTED if the functionality is not available, or
* NV_ERR_GENERIC if some other kind of error occurred.
*/
NV_STATUS NV_API_CALL
nv_imp_enable_disable_rfl
(
nv_state_t *nv,
NvBool bEnable
)
{
NV_STATUS status = NV_ERR_NOT_SUPPORTED;
#if IMP_SUPPORT_FUNCTIONS_PRESENT
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
struct tegra_bpmp *bpmp = tegra_bpmp_get(nvl->dev);
struct tegra_bpmp_message msg;
struct mrq_emc_disp_rfl_request emc_disp_rfl_request;
int rc;
memset(&emc_disp_rfl_request, 0, sizeof(emc_disp_rfl_request));
emc_disp_rfl_request.mode = bEnable ? EMC_DISP_RFL_MODE_ENABLED :
EMC_DISP_RFL_MODE_DISABLED;
msg.mrq = MRQ_EMC_DISP_RFL;
msg.tx.data = &emc_disp_rfl_request;
msg.tx.size = sizeof(emc_disp_rfl_request);
msg.rx.data = NULL;
msg.rx.size = 0;
rc = tegra_bpmp_transfer(bpmp, &msg);
if (rc == 0)
{
nv_printf(NV_DBG_INFO,
"\"Wait for RFL\" is %s via MRQ_EMC_DISP_RFL\n",
bEnable ? "enabled" : "disabled");
status = NV_OK;
}
else
{
nv_printf(NV_DBG_ERRORS,
"MRQ_EMC_DISP_RFL failed to %s \"Wait for RFL\" (error code = %d)\n",
bEnable ? "enable" : "disable",
rc);
status = NV_ERR_GENERIC;
}
#else
// Maybe we don't need the old implementation "else" clause cases anymore.
NV_ASSERT(NV_FALSE);
#endif
#endif
return status;
}
/*!
* @brief Obtains a handle for the display data path
*
* If a handle is obtained successfully, it is not returned to the caller; it
* is saved for later use by subsequent nv_imp_icc_set_bw calls.
* nv_imp_icc_get must be called prior to calling nv_imp_icc_set_bw.
*
* @param[out] nv Per GPU Linux state
*
* @returns NV_OK if successful,
* NV_ERR_NOT_SUPPORTED if the functionality is not available, or
* NV_ERR_GENERIC if some other error occurred.
*/
NV_STATUS NV_API_CALL
nv_imp_icc_get
(
nv_state_t *nv
)
{
#if ICC_SUPPORT_FUNCTIONS_PRESENT
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status = NV_OK;
#if defined(NV_ICC_GET_PRESENT)
struct device_node *np;
nvl->nv_imp_icc_path = NULL;
// Check if ICC is present in the device tree, and enabled.
np = of_find_node_by_path("/icc");
if (np != NULL)
{
if (of_device_is_available(np))
{
// Get the ICC data path.
nvl->nv_imp_icc_path =
icc_get(nvl->dev, TEGRA_ICC_DISPLAY, TEGRA_ICC_PRIMARY);
}
of_node_put(np);
}
#else
nv_printf(NV_DBG_ERRORS, "NVRM: icc_get() not present\n");
return NV_ERR_NOT_SUPPORTED;
#endif
if (nvl->nv_imp_icc_path == NULL)
{
nv_printf(NV_DBG_INFO, "NVRM: icc_get disabled\n");
status = NV_ERR_NOT_SUPPORTED;
}
else if (IS_ERR(nvl->nv_imp_icc_path))
{
nv_printf(NV_DBG_ERRORS, "NVRM: invalid path = %s\n",
PTR_ERR(nvl->nv_imp_icc_path));
nvl->nv_imp_icc_path = NULL;
status = NV_ERR_GENERIC;
}
return status;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}
/*!
* @brief Releases the handle obtained by nv_imp_icc_get
*
* @param[in] nv Per GPU Linux state
*/
void
nv_imp_icc_put
(
nv_state_t *nv
)
{
#if ICC_SUPPORT_FUNCTIONS_PRESENT
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
#if defined(NV_ICC_PUT_PRESENT)
if (nvl->nv_imp_icc_path != NULL)
{
icc_put(nvl->nv_imp_icc_path);
}
#else
nv_printf(NV_DBG_ERRORS, "icc_put() not present\n");
#endif
nvl->nv_imp_icc_path = NULL;
#endif
}
/*!
* @brief Allocates a specified amount of ISO memory bandwidth for display
*
* floor_bw_kbps is the minimum required (i.e., floor) dramclk frequency
* multiplied by the width of the pipe over which the display data will travel.
* (It is understood that the bandwidth calculated by multiplying the clock
* frequency by the pipe width will not be realistically achievable, due to
* overhead in the memory subsystem. ICC will not actually use the bandwidth
* value, except to reverse the calculation to get the required dramclk
* frequency.)
*
* nv_imp_icc_get must be called prior to calling this function.
*
* @param[in] nv Per GPU Linux state
* @param[in] avg_bw_kbps Amount of ISO memory bandwidth requested
* @param[in] floor_bw_kbps Min required dramclk freq * pipe width
*
* @returns NV_OK if successful,
* NV_ERR_INSUFFICIENT_RESOURCES if one of the bandwidth values is too
* high, and bandwidth cannot be allocated,
* NV_ERR_NOT_SUPPORTED if the functionality is not available, or
* NV_ERR_GENERIC if some other kind of error occurred.
*/
NV_STATUS NV_API_CALL
nv_imp_icc_set_bw
(
nv_state_t *nv,
NvU32 avg_bw_kbps,
NvU32 floor_bw_kbps
)
{
#if ICC_SUPPORT_FUNCTIONS_PRESENT
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int rc;
NV_STATUS status = NV_OK;
//
// avg_bw_kbps can be either ISO bw request or NISO bw request.
// Use floor_bw_kbps to make floor requests.
//
#if defined(NV_ICC_SET_BW_PRESENT)
//
// nv_imp_icc_path will be NULL on AV + L systems because ICC is disabled.
// In this case, skip the allocation call, and just return a success
// status.
//
if (nvl->nv_imp_icc_path == NULL)
{
return NV_OK;
}
rc = icc_set_bw(nvl->nv_imp_icc_path, avg_bw_kbps, floor_bw_kbps);
#else
nv_printf(NV_DBG_ERRORS, "icc_set_bw() not present\n");
return NV_ERR_NOT_SUPPORTED;
#endif
if (rc < 0)
{
// A negative return value indicates an error.
if (rc == -ENOMEM)
{
status = NV_ERR_INSUFFICIENT_RESOURCES;
}
else
{
status = NV_ERR_GENERIC;
}
}
return status;
#else
return NV_ERR_NOT_SUPPORTED;
#endif
}

View File

@@ -0,0 +1,158 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "dce_rm_client_ipc.h"
#if defined(NV_LINUX_PLATFORM_TEGRA_DCE_DCE_CLIENT_IPC_H_PRESENT)
#include <linux/platform/tegra/dce/dce-client-ipc.h>
#if (NV_IS_EXPORT_SYMBOL_PRESENT_tegra_dce_register_ipc_client && \
NV_IS_EXPORT_SYMBOL_PRESENT_tegra_dce_client_ipc_send_recv && \
NV_IS_EXPORT_SYMBOL_PRESENT_tegra_dce_unregister_ipc_client)
#define NV_IS_EXPORT_SYMBOLS_PRESENT_TEGRA_DCE_CLIENT 1
#else
#define NV_IS_EXPORT_SYMBOLS_PRESENT_TEGRA_DCE_CLIENT 0
#endif
#endif
#if (defined(NV_LINUX_PLATFORM_TEGRA_DCE_DCE_CLIENT_IPC_H_PRESENT) && \
NV_IS_EXPORT_SYMBOLS_PRESENT_TEGRA_DCE_CLIENT)
static const NvU32 dceClientRmIpcTypeMap[DCE_CLIENT_RM_IPC_TYPE_MAX] = {
[DCE_CLIENT_RM_IPC_TYPE_SYNC] = DCE_CLIENT_IPC_TYPE_CPU_RM,
[DCE_CLIENT_RM_IPC_TYPE_EVENT] = DCE_CLIENT_IPC_TYPE_RM_EVENT,
};
static NV_STATUS validate_dce_client_ipc_interface_type(NvU32 interfaceType)
{
if (interfaceType >= DCE_CLIENT_RM_IPC_TYPE_MAX)
{
return NV_ERR_INVALID_ARGUMENT;
}
if (dceClientRmIpcTypeMap[interfaceType] >= DCE_CLIENT_IPC_TYPE_MAX)
{
return NV_ERR_INVALID_ARGUMENT;
}
return NV_OK;
}
NvU32 nv_tegra_get_rm_interface_type(NvU32 clientIpcType)
{
NvU32 interfaceType = DCE_CLIENT_RM_IPC_TYPE_SYNC;
for (interfaceType = DCE_CLIENT_RM_IPC_TYPE_SYNC;
interfaceType < DCE_CLIENT_RM_IPC_TYPE_MAX;
interfaceType++)
{
if (dceClientRmIpcTypeMap[interfaceType] == clientIpcType)
return interfaceType;
}
return NV_ERR_INVALID_DATA;
}
NV_STATUS nv_tegra_dce_register_ipc_client
(
NvU32 interfaceType,
void *usrCtx,
nvTegraDceClientIpcCallback callbackFn,
NvU32 *handle
)
{
NvU32 dceClientInterfaceType = DCE_CLIENT_IPC_TYPE_MAX;
if (validate_dce_client_ipc_interface_type(interfaceType) != NV_OK)
{
return NV_ERR_INVALID_ARGUMENT;
}
dceClientInterfaceType = dceClientRmIpcTypeMap[interfaceType];
return tegra_dce_register_ipc_client(dceClientInterfaceType, callbackFn, usrCtx, handle);
}
NV_STATUS nv_tegra_dce_client_ipc_send_recv
(
NvU32 clientId,
void *msg,
NvU32 msgLength
)
{
struct dce_ipc_message dce_ipc_msg;
memset(&dce_ipc_msg, 0, sizeof(struct dce_ipc_message));
dce_ipc_msg.tx.data = msg;
dce_ipc_msg.rx.data = msg;
dce_ipc_msg.tx.size = msgLength;
dce_ipc_msg.rx.size = msgLength;
return tegra_dce_client_ipc_send_recv(clientId, &dce_ipc_msg);
}
NV_STATUS nv_tegra_dce_unregister_ipc_client(NvU32 clientId)
{
return tegra_dce_unregister_ipc_client(clientId);
}
#else
NvU32 nv_tegra_get_rm_interface_type(NvU32 clientIpcType)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS nv_tegra_dce_register_ipc_client
(
NvU32 interfaceType,
void *usrCtx,
nvTegraDceClientIpcCallback callbackFn,
NvU32 *handle
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS nv_tegra_dce_client_ipc_send_recv
(
NvU32 clientId,
void *msg,
NvU32 msgLength
)
{
return NV_ERR_NOT_SUPPORTED;
}
NV_STATUS nv_tegra_dce_unregister_ipc_client(NvU32 clientId)
{
return NV_ERR_NOT_SUPPORTED;
}
#endif

View File

@@ -0,0 +1,335 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-kthread-q.h"
#include "nv-list-helpers.h"
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/mm.h>
#if defined(NV_LINUX_BUG_H_PRESENT)
#include <linux/bug.h>
#else
#include <asm/bug.h>
#endif
// Today's implementation is a little simpler and more limited than the
// API description allows for in nv-kthread-q.h. Details include:
//
// 1. Each nv_kthread_q instance is a first-in, first-out queue.
//
// 2. Each nv_kthread_q instance is serviced by exactly one kthread.
//
// You can create any number of queues, each of which gets its own
// named kernel thread (kthread). You can then insert arbitrary functions
// into the queue, and those functions will be run in the context of the
// queue's kthread.
#ifndef WARN
// Only *really* old kernels (2.6.9) end up here. Just use a simple printk
// to implement this, because such kernels won't be supported much longer.
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
printk(KERN_ERR format); \
unlikely(__ret_warn_on); \
})
#endif
#define NVQ_WARN(fmt, ...) \
do { \
if (in_interrupt()) { \
WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \
##__VA_ARGS__); \
} \
else { \
WARN(1, "nv_kthread_q: task: %s: " fmt, \
current->comm, \
##__VA_ARGS__); \
} \
} while (0)
static int _main_loop(void *args)
{
nv_kthread_q_t *q = (nv_kthread_q_t *)args;
nv_kthread_q_item_t *q_item = NULL;
unsigned long flags;
while (1) {
// Normally this thread is never interrupted. However,
// down_interruptible (instead of down) is called here,
// in order to avoid being classified as a potentially
// hung task, by the kernel watchdog.
while (down_interruptible(&q->q_sem))
NVQ_WARN("Interrupted during semaphore wait\n");
if (atomic_read(&q->main_loop_should_exit))
break;
spin_lock_irqsave(&q->q_lock, flags);
// The q_sem semaphore prevents us from getting here unless there is
// at least one item in the list, so an empty list indicates a bug.
if (unlikely(list_empty(&q->q_list_head))) {
spin_unlock_irqrestore(&q->q_lock, flags);
NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q);
continue;
}
// Consume one item from the queue
q_item = list_first_entry(&q->q_list_head,
nv_kthread_q_item_t,
q_list_node);
list_del_init(&q_item->q_list_node);
spin_unlock_irqrestore(&q->q_lock, flags);
// Run the item
q_item->function_to_run(q_item->function_args);
// Make debugging a little simpler by clearing this between runs:
q_item = NULL;
}
while (!kthread_should_stop())
schedule();
return 0;
}
void nv_kthread_q_stop(nv_kthread_q_t *q)
{
// check if queue has been properly initialized
if (unlikely(!q->q_kthread))
return;
nv_kthread_q_flush(q);
// If this assertion fires, then a caller likely either broke the API rules,
// by adding items after calling nv_kthread_q_stop, or possibly messed up
// with inadequate flushing of self-rescheduling q_items.
if (unlikely(!list_empty(&q->q_list_head)))
NVQ_WARN("list not empty after flushing\n");
if (likely(!atomic_read(&q->main_loop_should_exit))) {
atomic_set(&q->main_loop_should_exit, 1);
// Wake up the kthread so that it can see that it needs to stop:
up(&q->q_sem);
kthread_stop(q->q_kthread);
q->q_kthread = NULL;
}
}
// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by
// kthread_create_on_node relies on a 2 entry, per-core cache to minimize
// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the
// stack location ends up being a function of the core assigned to the current
// thread, instead of being a function of the specified NUMA node. The cache was
// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0
// ("fork: Optimize task creation by caching two thread stacks per CPU if
// CONFIG_VMAP_STACK=y")
//
// To work around the problematic cache, we create up to three kernel threads
// -If the first thread's stack is resident on the preferred node, return this
// thread.
// -Otherwise, create a second thread. If its stack is resident on the
// preferred node, stop the first thread and return this one.
// -Otherwise, create a third thread. The stack allocator does not find a
// cached stack, and so falls back to vmalloc, which takes the NUMA hint into
// consideration. The first two threads are then stopped.
//
// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned.
//
// This function is never invoked when there is no NUMA preference (preferred
// node is NUMA_NO_NODE).
#if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1
static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
nv_kthread_q_t *q,
int preferred_node,
const char *q_name)
{
unsigned i, j;
const static unsigned attempts = 3;
struct task_struct *thread[3];
for (i = 0;; i++) {
struct page *stack;
thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name);
if (unlikely(IS_ERR(thread[i]))) {
// Instead of failing, pick the previous thread, even if its
// stack is not allocated on the preferred node.
if (i > 0)
i--;
break;
}
// vmalloc is not used to allocate the stack, so simply return the
// thread, even if its stack may not be allocated on the preferred node
if (!is_vmalloc_addr(thread[i]->stack))
break;
// Ran out of attempts - return thread even if its stack may not be
// allocated on the preferred node
if ((i == (attempts - 1)))
break;
// Get the NUMA node where the first page of the stack is resident. If
// it is the preferred node, select this thread.
stack = vmalloc_to_page(thread[i]->stack);
if (page_to_nid(stack) == preferred_node)
break;
}
for (j = i; j > 0; j--)
kthread_stop(thread[j - 1]);
return thread[i];
}
#endif
int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node)
{
memset(q, 0, sizeof(*q));
INIT_LIST_HEAD(&q->q_list_head);
spin_lock_init(&q->q_lock);
sema_init(&q->q_sem, 0);
if (preferred_node == NV_KTHREAD_NO_NODE) {
q->q_kthread = kthread_create(_main_loop, q, q_name);
}
else {
#if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1
q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name);
#else
return -ENOTSUPP;
#endif
}
if (IS_ERR(q->q_kthread)) {
int err = PTR_ERR(q->q_kthread);
// Clear q_kthread before returning so that nv_kthread_q_stop() can be
// safely called on it making error handling easier.
q->q_kthread = NULL;
return err;
}
wake_up_process(q->q_kthread);
return 0;
}
// Returns true (non-zero) if the item was actually scheduled, and false if the
// item was already pending in a queue.
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&q->q_lock, flags);
if (likely(list_empty(&q_item->q_list_node)))
list_add_tail(&q_item->q_list_node, &q->q_list_head);
else
ret = 0;
spin_unlock_irqrestore(&q->q_lock, flags);
if (likely(ret))
up(&q->q_sem);
return ret;
}
void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item,
nv_q_func_t function_to_run,
void *function_args)
{
INIT_LIST_HEAD(&q_item->q_list_node);
q_item->function_to_run = function_to_run;
q_item->function_args = function_args;
}
// Returns true (non-zero) if the q_item got scheduled, false otherwise.
int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q,
nv_kthread_q_item_t *q_item)
{
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was "
"called with a non-alive q: 0x%p\n", q);
return 0;
}
return _raw_q_schedule(q, q_item);
}
static void _q_flush_function(void *args)
{
struct completion *completion = (struct completion *)args;
complete(completion);
}
static void _raw_q_flush(nv_kthread_q_t *q)
{
nv_kthread_q_item_t q_item;
DECLARE_COMPLETION(completion);
nv_kthread_q_item_init(&q_item, _q_flush_function, &completion);
_raw_q_schedule(q, &q_item);
// Wait for the flush item to run. Once it has run, then all of the
// previously queued items in front of it will have run, so that means
// the flush is complete.
wait_for_completion(&completion);
}
void nv_kthread_q_flush(nv_kthread_q_t *q)
{
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
NVQ_WARN("Not allowed: nv_kthread_q_flush was called after "
"nv_kthread_q_stop. q: 0x%p\n", q);
return;
}
// This 2x flush is not a typing mistake. The queue really does have to be
// flushed twice, in order to take care of the case of a q_item that
// reschedules itself.
_raw_q_flush(q);
_raw_q_flush(q);
}

View File

@@ -0,0 +1,232 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-memdbg.h"
#include "nv-linux.h"
/* track who's allocating memory and print out a list of leaked allocations at
* teardown.
*/
typedef struct {
struct rb_node rb_node;
void *addr;
NvU64 size;
NvU32 line;
const char *file;
} nv_memdbg_node_t;
struct
{
struct rb_root rb_root;
NvU64 untracked_bytes;
NvU64 num_untracked_allocs;
nv_spinlock_t lock;
} g_nv_memdbg;
void nv_memdbg_init(void)
{
NV_SPIN_LOCK_INIT(&g_nv_memdbg.lock);
g_nv_memdbg.rb_root = RB_ROOT;
}
static nv_memdbg_node_t *nv_memdbg_node_entry(struct rb_node *rb_node)
{
return rb_entry(rb_node, nv_memdbg_node_t, rb_node);
}
static void nv_memdbg_insert_node(nv_memdbg_node_t *new)
{
nv_memdbg_node_t *node;
struct rb_node **rb_node = &g_nv_memdbg.rb_root.rb_node;
struct rb_node *rb_parent = NULL;
while (*rb_node)
{
node = nv_memdbg_node_entry(*rb_node);
WARN_ON(new->addr == node->addr);
rb_parent = *rb_node;
if (new->addr < node->addr)
rb_node = &(*rb_node)->rb_left;
else
rb_node = &(*rb_node)->rb_right;
}
rb_link_node(&new->rb_node, rb_parent, rb_node);
rb_insert_color(&new->rb_node, &g_nv_memdbg.rb_root);
}
static nv_memdbg_node_t *nv_memdbg_remove_node(void *addr)
{
nv_memdbg_node_t *node = NULL;
struct rb_node *rb_node = g_nv_memdbg.rb_root.rb_node;
while (rb_node)
{
node = nv_memdbg_node_entry(rb_node);
if (addr == node->addr)
break;
else if (addr < node->addr)
rb_node = rb_node->rb_left;
else
rb_node = rb_node->rb_right;
}
WARN_ON(!node || node->addr != addr);
rb_erase(&node->rb_node, &g_nv_memdbg.rb_root);
return node;
}
void nv_memdbg_add(void *addr, NvU64 size, const char *file, int line)
{
nv_memdbg_node_t *node;
unsigned long flags;
WARN_ON(addr == NULL);
/* If node allocation fails, we can still update the untracked counters */
node = kmalloc(sizeof(*node),
NV_MAY_SLEEP() ? NV_GFP_KERNEL : NV_GFP_ATOMIC);
if (node)
{
node->addr = addr;
node->size = size;
node->file = file;
node->line = line;
}
NV_SPIN_LOCK_IRQSAVE(&g_nv_memdbg.lock, flags);
if (node)
{
nv_memdbg_insert_node(node);
}
else
{
++g_nv_memdbg.num_untracked_allocs;
g_nv_memdbg.untracked_bytes += size;
}
NV_SPIN_UNLOCK_IRQRESTORE(&g_nv_memdbg.lock, flags);
}
void nv_memdbg_remove(void *addr, NvU64 size, const char *file, int line)
{
nv_memdbg_node_t *node;
unsigned long flags;
NV_SPIN_LOCK_IRQSAVE(&g_nv_memdbg.lock, flags);
node = nv_memdbg_remove_node(addr);
if (!node)
{
WARN_ON(g_nv_memdbg.num_untracked_allocs == 0);
WARN_ON(g_nv_memdbg.untracked_bytes < size);
--g_nv_memdbg.num_untracked_allocs;
g_nv_memdbg.untracked_bytes -= size;
}
NV_SPIN_UNLOCK_IRQRESTORE(&g_nv_memdbg.lock, flags);
if (node)
{
if ((size != 0) && (node->size != size))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: size mismatch on free: %llu != %llu\n",
size, node->size);
if (node->file)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: allocation: 0x%p @ %s:%d\n",
node->addr, node->file, node->line);
}
else
{
nv_printf(NV_DBG_ERRORS,
"NVRM: allocation: 0x%p\n",
node->addr);
}
os_dbg_breakpoint();
}
kfree(node);
}
}
void nv_memdbg_exit(void)
{
nv_memdbg_node_t *node;
NvU64 leaked_bytes = 0, num_leaked_allocs = 0;
if (!RB_EMPTY_ROOT(&g_nv_memdbg.rb_root))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: list of leaked memory allocations:\n");
}
while (!RB_EMPTY_ROOT(&g_nv_memdbg.rb_root))
{
node = nv_memdbg_node_entry(rb_first(&g_nv_memdbg.rb_root));
leaked_bytes += node->size;
++num_leaked_allocs;
if (node->file)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %llu bytes, 0x%p @ %s:%d\n",
node->size, node->addr, node->file, node->line);
}
else
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %llu bytes, 0x%p\n",
node->size, node->addr);
}
rb_erase(&node->rb_node, &g_nv_memdbg.rb_root);
kfree(node);
}
/* If we failed to allocate a node at some point, we may have leaked memory
* even if the tree is empty */
if (num_leaked_allocs > 0 || g_nv_memdbg.num_untracked_allocs > 0)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: total leaked memory: %llu bytes in %llu allocations\n",
leaked_bytes + g_nv_memdbg.untracked_bytes,
num_leaked_allocs + g_nv_memdbg.num_untracked_allocs);
if (g_nv_memdbg.num_untracked_allocs > 0)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %llu bytes in %llu allocations untracked\n",
g_nv_memdbg.untracked_bytes, g_nv_memdbg.num_untracked_allocs);
}
}
}

View File

@@ -0,0 +1,781 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "nv_speculation_barrier.h"
/*
* The 'struct vm_operations' open() callback is called by the Linux
* kernel when the parent VMA is split or copied, close() when the
* current VMA is about to be deleted.
*
* We implement these callbacks to keep track of the number of user
* mappings of system memory allocations. This was motivated by a
* subtle interaction problem between the driver and the kernel with
* respect to the bookkeeping of pages marked reserved and later
* mapped with mmap().
*
* Traditionally, the Linux kernel ignored reserved pages, such that
* when they were mapped via mmap(), the integrity of their usage
* counts depended on the reserved bit being set for as long as user
* mappings existed.
*
* Since we mark system memory pages allocated for DMA reserved and
* typically map them with mmap(), we need to ensure they remain
* reserved until the last mapping has been torn down. This worked
* correctly in most cases, but in a few, the RM API called into the
* RM to free memory before calling munmap() to unmap it.
*
* In the past, we allowed nv_free_pages() to remove the 'at' from
* the parent device's allocation list in this case, but didn't
* release the underlying pages until the last user mapping had been
* destroyed:
*
* In nvidia_vma_release(), we freed any resources associated with
* the allocation (IOMMU mappings, etc.) and cleared the
* underlying pages' reserved bits, but didn't free them. The kernel
* was expected to do this.
*
* This worked in practise, but made dangerous assumptions about the
* kernel's behavior and could fail in some cases. We now handle
* this case differently (see below).
*/
static void
nvidia_vma_open(struct vm_area_struct *vma)
{
nv_alloc_t *at = NV_VMA_PRIVATE(vma);
NV_PRINT_VMA(NV_DBG_MEMINFO, vma);
if (at != NULL)
{
NV_ATOMIC_INC(at->usage_count);
NV_PRINT_AT(NV_DBG_MEMINFO, at);
}
}
/*
* (see above for additional information)
*
* If the 'at' usage count drops to zero with the updated logic, the
* the allocation is recorded in the free list of the private
* data associated with the file pointer; nvidia_close() uses this
* list to perform deferred free operations when the parent file
* descriptor is closed. This will typically happen when the process
* exits.
*
* Since this is technically a workaround to handle possible fallout
* from misbehaving clients, we additionally print a warning.
*/
static void
nvidia_vma_release(struct vm_area_struct *vma)
{
nv_alloc_t *at = NV_VMA_PRIVATE(vma);
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma));
static int count = 0;
NV_PRINT_VMA(NV_DBG_MEMINFO, vma);
if (at != NULL && nv_alloc_release(nvlfp, at))
{
if ((at->pid == os_get_current_process()) &&
(count++ < NV_MAX_RECURRING_WARNING_MESSAGES))
{
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: late unmap, comm: %s, 0x%p\n",
__FUNCTION__, current->comm, at);
}
}
}
static int
nvidia_vma_access(
struct vm_area_struct *vma,
unsigned long addr,
void *buffer,
int length,
int write
)
{
nv_alloc_t *at = NULL;
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma));
nv_state_t *nv = NV_STATE_PTR(nvlfp->nvptr);
NvU32 pageIndex, pageOffset;
void *kernel_mapping;
const nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context;
NvU64 offset;
pageIndex = ((addr - vma->vm_start) >> PAGE_SHIFT);
pageOffset = (addr & ~PAGE_MASK);
if (!mmap_context->valid)
{
nv_printf(NV_DBG_ERRORS, "NVRM: VM: invalid mmap context\n");
return -EINVAL;
}
offset = mmap_context->mmap_start;
if (nv->flags & NV_FLAG_CONTROL)
{
at = NV_VMA_PRIVATE(vma);
/*
* at can be NULL for peer IO mem.
*/
if (!at)
return -EINVAL;
if (pageIndex >= at->num_pages)
return -EINVAL;
/*
* For PPC64LE build, nv_array_index_no_speculate() is not defined
* therefore call nv_speculation_barrier().
* When this definition is added, this platform check should be removed.
*/
#if !defined(NVCPU_PPC64LE)
pageIndex = nv_array_index_no_speculate(pageIndex, at->num_pages);
#else
nv_speculation_barrier();
#endif
kernel_mapping = (void *)(at->page_table[pageIndex]->virt_addr + pageOffset);
}
else if (IS_FB_OFFSET(nv, offset, length))
{
addr = (offset & PAGE_MASK);
kernel_mapping = os_map_kernel_space(addr, PAGE_SIZE, NV_MEMORY_UNCACHED);
if (kernel_mapping == NULL)
return -ENOMEM;
kernel_mapping = ((char *)kernel_mapping + pageOffset);
}
else
return -EINVAL;
length = NV_MIN(length, (int)(PAGE_SIZE - pageOffset));
if (write)
memcpy(kernel_mapping, buffer, length);
else
memcpy(buffer, kernel_mapping, length);
if (at == NULL)
{
kernel_mapping = ((char *)kernel_mapping - pageOffset);
os_unmap_kernel_space(kernel_mapping, PAGE_SIZE);
}
return length;
}
static vm_fault_t nvidia_fault(
#if !defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
struct vm_area_struct *vma,
#endif
struct vm_fault *vmf
)
{
#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
struct vm_area_struct *vma = vmf->vma;
#endif
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma));
nv_linux_state_t *nvl = nvlfp->nvptr;
nv_state_t *nv = NV_STATE_PTR(nvl);
vm_fault_t ret = VM_FAULT_NOPAGE;
NvU64 page;
NvU64 num_pages = NV_VMA_SIZE(vma) >> PAGE_SHIFT;
NvU64 pfn_start =
(nvlfp->mmap_context.mmap_start >> PAGE_SHIFT) + vma->vm_pgoff;
// Mapping revocation is only supported for GPU mappings.
if (NV_IS_CTL_DEVICE(nv))
{
return VM_FAULT_SIGBUS;
}
// Wake up GPU and reinstate mappings only if we are not in S3/S4 entry
if (!down_read_trylock(&nv_system_pm_lock))
{
return VM_FAULT_NOPAGE;
}
down(&nvl->mmap_lock);
// Wake up the GPU if it is not currently safe to mmap.
if (!nvl->safe_to_mmap)
{
NV_STATUS status;
if (!nvl->gpu_wakeup_callback_needed)
{
// GPU wakeup callback already scheduled.
up(&nvl->mmap_lock);
up_read(&nv_system_pm_lock);
return VM_FAULT_NOPAGE;
}
/*
* GPU wakeup cannot be completed directly in the fault handler due to the
* inability to take the GPU lock while mmap_lock is held.
*/
status = rm_schedule_gpu_wakeup(nvl->sp[NV_DEV_STACK_GPU_WAKEUP], nv);
if (status != NV_OK)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: rm_schedule_gpu_wakeup failed: %x\n", status);
up(&nvl->mmap_lock);
up_read(&nv_system_pm_lock);
return VM_FAULT_SIGBUS;
}
// Ensure that we do not schedule duplicate GPU wakeup callbacks.
nvl->gpu_wakeup_callback_needed = NV_FALSE;
up(&nvl->mmap_lock);
up_read(&nv_system_pm_lock);
return VM_FAULT_NOPAGE;
}
// Safe to mmap, map all pages in this VMA.
for (page = 0; page < num_pages; page++)
{
NvU64 virt_addr = vma->vm_start + (page << PAGE_SHIFT);
NvU64 pfn = pfn_start + page;
ret = nv_insert_pfn(vma, virt_addr, pfn,
nvlfp->mmap_context.remap_prot_extra);
if (ret != VM_FAULT_NOPAGE)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: nv_insert_pfn failed: %x\n", ret);
break;
}
nvl->all_mappings_revoked = NV_FALSE;
}
up(&nvl->mmap_lock);
up_read(&nv_system_pm_lock);
return ret;
}
static struct vm_operations_struct nv_vm_ops = {
.open = nvidia_vma_open,
.close = nvidia_vma_release,
.fault = nvidia_fault,
.access = nvidia_vma_access,
};
int nv_encode_caching(
pgprot_t *prot,
NvU32 cache_type,
nv_memory_type_t memory_type
)
{
pgprot_t tmp;
if (prot == NULL)
{
tmp = __pgprot(0);
prot = &tmp;
}
switch (cache_type)
{
case NV_MEMORY_UNCACHED_WEAK:
#if defined(NV_PGPROT_UNCACHED_WEAK)
*prot = NV_PGPROT_UNCACHED_WEAK(*prot);
break;
#endif
case NV_MEMORY_UNCACHED:
*prot = (memory_type == NV_MEMORY_TYPE_SYSTEM) ?
NV_PGPROT_UNCACHED(*prot) :
NV_PGPROT_UNCACHED_DEVICE(*prot);
break;
#if defined(NV_PGPROT_WRITE_COMBINED) && \
defined(NV_PGPROT_WRITE_COMBINED_DEVICE)
case NV_MEMORY_DEFAULT:
case NV_MEMORY_WRITECOMBINED:
if (NV_ALLOW_WRITE_COMBINING(memory_type))
{
*prot = (memory_type == NV_MEMORY_TYPE_FRAMEBUFFER) ?
NV_PGPROT_WRITE_COMBINED_DEVICE(*prot) :
NV_PGPROT_WRITE_COMBINED(*prot);
break;
}
/*
* If WC support is unavailable, we need to return an error
* code to the caller, but need not print a warning.
*
* For frame buffer memory, callers are expected to use the
* UC- memory type if we report WC as unsupported, which
* translates to the effective memory type WC if a WC MTRR
* exists or else UC.
*/
return 1;
#endif
case NV_MEMORY_CACHED:
if (NV_ALLOW_CACHING(memory_type))
break;
// Intentional fallthrough.
default:
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: cache type %d not supported for memory type %d!\n",
cache_type, memory_type);
return 1;
}
return 0;
}
int static nvidia_mmap_peer_io(
struct vm_area_struct *vma,
nv_alloc_t *at,
NvU64 page_index,
NvU64 pages
)
{
int ret;
NvU64 start;
NvU64 size;
BUG_ON(!at->flags.contig);
start = at->page_table[page_index]->phys_addr;
size = pages * PAGE_SIZE;
ret = nv_io_remap_page_range(vma, start, size, 0);
return ret;
}
int static nvidia_mmap_sysmem(
struct vm_area_struct *vma,
nv_alloc_t *at,
NvU64 page_index,
NvU64 pages
)
{
NvU64 j;
int ret = 0;
unsigned long start = 0;
NV_ATOMIC_INC(at->usage_count);
start = vma->vm_start;
for (j = page_index; j < (page_index + pages); j++)
{
/*
* For PPC64LE build, nv_array_index_no_speculate() is not defined
* therefore call nv_speculation_barrier().
* When this definition is added, this platform check should be removed.
*/
#if !defined(NVCPU_PPC64LE)
j = nv_array_index_no_speculate(j, (page_index + pages));
#else
nv_speculation_barrier();
#endif
#if defined(NV_VGPU_KVM_BUILD)
if (at->flags.guest)
{
ret = nv_remap_page_range(vma, start, at->page_table[j]->phys_addr,
PAGE_SIZE, vma->vm_page_prot);
}
else
#endif
{
vma->vm_page_prot = nv_adjust_pgprot(vma->vm_page_prot, 0);
ret = vm_insert_page(vma, start,
NV_GET_PAGE_STRUCT(at->page_table[j]->phys_addr));
}
if (ret)
{
NV_ATOMIC_DEC(at->usage_count);
return -EAGAIN;
}
start += PAGE_SIZE;
}
return ret;
}
static int nvidia_mmap_numa(
struct vm_area_struct *vma,
const nv_alloc_mapping_context_t *mmap_context)
{
NvU64 start, addr;
unsigned int pages;
NvU64 i;
pages = NV_VMA_SIZE(vma) >> PAGE_SHIFT;
start = vma->vm_start;
if (mmap_context->num_pages < pages)
{
return -EINVAL;
}
// Needed for the linux kernel for mapping compound pages
vma->vm_flags |= VM_MIXEDMAP;
for (i = 0, addr = mmap_context->page_array[0]; i < pages;
addr = mmap_context->page_array[++i], start += PAGE_SIZE)
{
if (vm_insert_page(vma, start, NV_GET_PAGE_STRUCT(addr)) != 0)
{
return -EAGAIN;
}
}
return 0;
}
int nvidia_mmap_helper(
nv_state_t *nv,
nv_linux_file_private_t *nvlfp,
nvidia_stack_t *sp,
struct vm_area_struct *vma,
void *vm_priv
)
{
NvU32 prot = 0;
int ret;
const nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NV_STATUS status;
if (nvlfp == NULL)
return NV_ERR_INVALID_ARGUMENT;
/*
* If mmap context is not valid on this file descriptor, this mapping wasn't
* previously validated with the RM so it must be rejected.
*/
if (!mmap_context->valid)
{
nv_printf(NV_DBG_ERRORS, "NVRM: VM: invalid mmap\n");
return -EINVAL;
}
NV_PRINT_VMA(NV_DBG_MEMINFO, vma);
status = nv_check_gpu_state(nv);
if (status != NV_OK)
{
NV_DEV_PRINTF(NV_DBG_INFO, nv,
"GPU is lost, skipping nvidia_mmap_helper\n");
return status;
}
NV_VMA_PRIVATE(vma) = vm_priv;
prot = mmap_context->prot;
/*
* Nvidia device node(nvidia#) maps device's BAR memory,
* Nvidia control node(nvidiactrl) maps system memory.
*/
if (!NV_IS_CTL_DEVICE(nv))
{
NvU32 remap_prot_extra = mmap_context->remap_prot_extra;
NvU64 mmap_start = mmap_context->mmap_start;
NvU64 mmap_length = mmap_context->mmap_size;
NvU64 access_start = mmap_context->access_start;
NvU64 access_len = mmap_context->access_size;
if (IS_REG_OFFSET(nv, access_start, access_len))
{
if (nv_encode_caching(&vma->vm_page_prot, NV_MEMORY_UNCACHED,
NV_MEMORY_TYPE_REGISTERS))
{
return -ENXIO;
}
}
else if (IS_FB_OFFSET(nv, access_start, access_len))
{
if (IS_UD_OFFSET(nv, access_start, access_len))
{
if (nv_encode_caching(&vma->vm_page_prot, NV_MEMORY_UNCACHED,
NV_MEMORY_TYPE_FRAMEBUFFER))
{
return -ENXIO;
}
}
else
{
if (nv_encode_caching(&vma->vm_page_prot,
rm_disable_iomap_wc() ? NV_MEMORY_UNCACHED : mmap_context->caching,
NV_MEMORY_TYPE_FRAMEBUFFER))
{
if (nv_encode_caching(&vma->vm_page_prot,
NV_MEMORY_UNCACHED_WEAK, NV_MEMORY_TYPE_FRAMEBUFFER))
{
return -ENXIO;
}
}
}
}
down(&nvl->mmap_lock);
if (nvl->safe_to_mmap)
{
nvl->all_mappings_revoked = NV_FALSE;
//
// This path is similar to the sysmem mapping code.
// TODO: Refactor is needed as part of bug#2001704.
// Use pfn_valid to determine whether the physical address has
// backing struct page. This is used to isolate P8 from P9.
//
if ((nv_get_numa_status(nvl) == NV_NUMA_STATUS_ONLINE) &&
!IS_REG_OFFSET(nv, access_start, access_len) &&
(pfn_valid(PFN_DOWN(mmap_start))))
{
ret = nvidia_mmap_numa(vma, mmap_context);
if (ret)
{
up(&nvl->mmap_lock);
return ret;
}
}
else
{
if (nv_io_remap_page_range(vma, mmap_start, mmap_length,
remap_prot_extra) != 0)
{
up(&nvl->mmap_lock);
return -EAGAIN;
}
}
}
up(&nvl->mmap_lock);
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND;
}
else
{
nv_alloc_t *at;
NvU64 page_index;
NvU64 pages;
NvU64 mmap_size;
at = (nv_alloc_t *)mmap_context->alloc;
page_index = mmap_context->page_index;
mmap_size = NV_VMA_SIZE(vma);
pages = mmap_size >> PAGE_SHIFT;
if ((page_index + pages) > at->num_pages)
{
return -ERANGE;
}
/*
* Callers that pass in non-NULL VMA private data must never reach this
* code. They should be mapping on a non-control node.
*/
BUG_ON(NV_VMA_PRIVATE(vma));
if (at->flags.peer_io)
{
if (nv_encode_caching(&vma->vm_page_prot,
at->cache_type,
NV_MEMORY_TYPE_DEVICE_MMIO))
{
return -ENXIO;
}
/*
* There is no need to keep 'peer IO at' alive till vma_release like
* 'sysmem at' because there are no security concerns where a client
* could free RM allocated sysmem before unmapping it. Hence, vm_ops
* are NOP, and at->usage_count is never being used.
*/
NV_VMA_PRIVATE(vma) = NULL;
ret = nvidia_mmap_peer_io(vma, at, page_index, pages);
BUG_ON(NV_VMA_PRIVATE(vma));
}
else
{
if (nv_encode_caching(&vma->vm_page_prot,
at->cache_type,
NV_MEMORY_TYPE_SYSTEM))
{
return -ENXIO;
}
NV_VMA_PRIVATE(vma) = at;
ret = nvidia_mmap_sysmem(vma, at, page_index, pages);
}
if (ret)
{
return ret;
}
NV_PRINT_AT(NV_DBG_MEMINFO, at);
vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED);
vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
}
if ((prot & NV_PROTECT_WRITEABLE) == 0)
{
vma->vm_page_prot = NV_PGPROT_READ_ONLY(vma->vm_page_prot);
vma->vm_flags &= ~VM_WRITE;
vma->vm_flags &= ~VM_MAYWRITE;
}
vma->vm_ops = &nv_vm_ops;
return 0;
}
int nvidia_mmap(
struct file *file,
struct vm_area_struct *vma
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_FILEP(file);
nv_state_t *nv = NV_STATE_PTR(nvl);
nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file);
nvidia_stack_t *sp = NULL;
int status;
//
// Do not allow mmap operation if this is a fd into
// which rm objects have been exported.
//
if (nvlfp->nvfp.handles != NULL)
{
return -EINVAL;
}
down(&nvlfp->fops_sp_lock[NV_FOPS_STACK_INDEX_MMAP]);
sp = nvlfp->fops_sp[NV_FOPS_STACK_INDEX_MMAP];
status = nvidia_mmap_helper(nv, nvlfp, sp, vma, NULL);
up(&nvlfp->fops_sp_lock[NV_FOPS_STACK_INDEX_MMAP]);
return status;
}
void
nv_revoke_gpu_mappings_locked(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
nv_linux_file_private_t *nvlfp;
/* Revoke all mappings for every open file */
list_for_each_entry (nvlfp, &nvl->open_files, entry)
{
unmap_mapping_range(&nvlfp->mapping, 0, ~0, 1);
}
nvl->all_mappings_revoked = NV_TRUE;
}
NV_STATUS NV_API_CALL nv_revoke_gpu_mappings(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
// Mapping revocation is only supported for GPU mappings.
if (NV_IS_CTL_DEVICE(nv))
{
return NV_ERR_NOT_SUPPORTED;
}
down(&nvl->mmap_lock);
nv_revoke_gpu_mappings_locked(nv);
up(&nvl->mmap_lock);
return NV_OK;
}
void NV_API_CALL nv_acquire_mmap_lock(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
down(&nvl->mmap_lock);
}
void NV_API_CALL nv_release_mmap_lock(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
up(&nvl->mmap_lock);
}
NvBool NV_API_CALL nv_get_all_mappings_revoked_locked(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
// Caller must hold nvl->mmap_lock for all decisions based on this
return nvl->all_mappings_revoked;
}
void NV_API_CALL nv_set_safe_to_mmap_locked(
nv_state_t *nv,
NvBool safe_to_mmap
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
// Caller must hold nvl->mmap_lock
/*
* If nvl->safe_to_mmap is transitioning from TRUE to FALSE, we expect to
* need to schedule a GPU wakeup callback when we fault.
*
* nvl->gpu_wakeup_callback_needed will be set to FALSE in nvidia_fault()
* after scheduling the GPU wakeup callback, preventing us from scheduling
* duplicates.
*/
if (!safe_to_mmap && nvl->safe_to_mmap)
{
nvl->gpu_wakeup_callback_needed = NV_TRUE;
}
nvl->safe_to_mmap = safe_to_mmap;
}

View File

@@ -0,0 +1,146 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-modeset-interface.h"
#include "os-interface.h"
#include "nv-linux.h"
#include "nvstatus.h"
#include "nv.h"
static const nvidia_modeset_callbacks_t *nv_modeset_callbacks;
static int nvidia_modeset_rm_ops_alloc_stack(nvidia_stack_t **sp)
{
return nv_kmem_cache_alloc_stack(sp);
}
static void nvidia_modeset_rm_ops_free_stack(nvidia_stack_t *sp)
{
if (sp != NULL)
{
nv_kmem_cache_free_stack(sp);
}
}
static int nvidia_modeset_set_callbacks(const nvidia_modeset_callbacks_t *cb)
{
if ((nv_modeset_callbacks != NULL && cb != NULL) ||
(nv_modeset_callbacks == NULL && cb == NULL))
{
return -EINVAL;
}
nv_modeset_callbacks = cb;
return 0;
}
void nvidia_modeset_suspend(NvU32 gpuId)
{
if (nv_modeset_callbacks)
{
nv_modeset_callbacks->suspend(gpuId);
}
}
void nvidia_modeset_resume(NvU32 gpuId)
{
if (nv_modeset_callbacks)
{
nv_modeset_callbacks->resume(gpuId);
}
}
static NvU32 nvidia_modeset_enumerate_gpus(nv_gpu_info_t *gpu_info)
{
nv_linux_state_t *nvl;
unsigned int count;
LOCK_NV_LINUX_DEVICES();
count = 0;
for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next)
{
nv_state_t *nv = NV_STATE_PTR(nvl);
/*
* The gpu_info[] array has NV_MAX_GPUS elements. Fail if there
* are more GPUs than that.
*/
if (count >= NV_MAX_GPUS) {
nv_printf(NV_DBG_WARNINGS, "NVRM: More than %d GPUs found.",
NV_MAX_GPUS);
count = 0;
break;
}
gpu_info[count].gpu_id = nv->gpu_id;
gpu_info[count].pci_info.domain = nv->pci_info.domain;
gpu_info[count].pci_info.bus = nv->pci_info.bus;
gpu_info[count].pci_info.slot = nv->pci_info.slot;
gpu_info[count].pci_info.function = nv->pci_info.function;
gpu_info[count].os_device_ptr = nvl->dev;
count++;
}
UNLOCK_NV_LINUX_DEVICES();
return count;
}
NV_STATUS nvidia_get_rm_ops(nvidia_modeset_rm_ops_t *rm_ops)
{
const nvidia_modeset_rm_ops_t local_rm_ops = {
.version_string = NV_VERSION_STRING,
.system_info = {
.allow_write_combining = NV_FALSE,
},
.alloc_stack = nvidia_modeset_rm_ops_alloc_stack,
.free_stack = nvidia_modeset_rm_ops_free_stack,
.enumerate_gpus = nvidia_modeset_enumerate_gpus,
.open_gpu = nvidia_dev_get,
.close_gpu = nvidia_dev_put,
.op = rm_kernel_rmapi_op, /* provided by nv-kernel.o */
.set_callbacks = nvidia_modeset_set_callbacks,
};
if (strcmp(rm_ops->version_string, NV_VERSION_STRING) != 0)
{
rm_ops->version_string = NV_VERSION_STRING;
return NV_ERR_GENERIC;
}
*rm_ops = local_rm_ops;
if (NV_ALLOW_WRITE_COMBINING(NV_MEMORY_TYPE_FRAMEBUFFER)) {
rm_ops->system_info.allow_write_combining = NV_TRUE;
}
return NV_OK;
}
EXPORT_SYMBOL(nvidia_get_rm_ops);

View File

@@ -0,0 +1,169 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-msi.h"
#include "nv-proto.h"
#if defined(NV_LINUX_PCIE_MSI_SUPPORTED)
void NV_API_CALL nv_init_msi(nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int rc = 0;
rc = pci_enable_msi(nvl->pci_dev);
if (rc == 0)
{
nv->interrupt_line = nvl->pci_dev->irq;
nv->flags |= NV_FLAG_USES_MSI;
nvl->num_intr = 1;
NV_KMALLOC(nvl->irq_count, sizeof(nv_irq_count_info_t) * nvl->num_intr);
if (nvl->irq_count == NULL)
{
nv->flags &= ~NV_FLAG_USES_MSI;
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
"Failed to allocate counter for MSI entry; "
"falling back to PCIe virtual-wire interrupts.\n");
}
else
{
memset(nvl->irq_count, 0, sizeof(nv_irq_count_info_t) * nvl->num_intr);
nvl->current_num_irq_tracked = 0;
}
}
else
{
nv->flags &= ~NV_FLAG_USES_MSI;
if (nvl->pci_dev->irq != 0)
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
"Failed to enable MSI; "
"falling back to PCIe virtual-wire interrupts.\n");
}
}
return;
}
void NV_API_CALL nv_init_msix(nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
int num_intr = 0;
struct msix_entry *msix_entries;
int rc = 0;
int i;
NV_SPIN_LOCK_INIT(&nvl->msix_isr_lock);
rc = os_alloc_mutex(&nvl->msix_bh_mutex);
if (rc != 0)
goto failed;
num_intr = nv_get_max_irq(nvl->pci_dev);
if (num_intr > NV_RM_MAX_MSIX_LINES)
{
NV_DEV_PRINTF(NV_DBG_INFO, nv, "Reducing MSI-X count from %d to the "
"driver-supported maximum %d.\n", num_intr, NV_RM_MAX_MSIX_LINES);
num_intr = NV_RM_MAX_MSIX_LINES;
}
NV_KMALLOC(nvl->msix_entries, sizeof(struct msix_entry) * num_intr);
if (nvl->msix_entries == NULL)
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to allocate MSI-X entries.\n");
goto failed;
}
for (i = 0, msix_entries = nvl->msix_entries; i < num_intr; i++, msix_entries++)
{
msix_entries->entry = i;
}
NV_KMALLOC(nvl->irq_count, sizeof(nv_irq_count_info_t) * num_intr);
if (nvl->irq_count == NULL)
{
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to allocate counter for MSI-X entries.\n");
goto failed;
}
else
{
memset(nvl->irq_count, 0, sizeof(nv_irq_count_info_t) * num_intr);
nvl->current_num_irq_tracked = 0;
}
rc = nv_pci_enable_msix(nvl, num_intr);
if (rc != NV_OK)
goto failed;
nv->flags |= NV_FLAG_USES_MSIX;
return;
failed:
nv->flags &= ~NV_FLAG_USES_MSIX;
if (nvl->msix_entries)
{
NV_KFREE(nvl->msix_entries, sizeof(struct msix_entry) * num_intr);
}
if (nvl->irq_count)
{
NV_KFREE(nvl->irq_count, sizeof(nv_irq_count_info_t) * num_intr);
}
if (nvl->msix_bh_mutex)
{
os_free_mutex(nvl->msix_bh_mutex);
nvl->msix_bh_mutex = NULL;
}
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to enable MSI-X.\n");
}
NvS32 NV_API_CALL nv_request_msix_irq(nv_linux_state_t *nvl)
{
int i;
int j;
struct msix_entry *msix_entries;
int rc = NV_ERR_INVALID_ARGUMENT;
nv_state_t *nv = NV_STATE_PTR(nvl);
for (i = 0, msix_entries = nvl->msix_entries; i < nvl->num_intr;
i++, msix_entries++)
{
rc = request_threaded_irq(msix_entries->vector, nvidia_isr_msix,
nvidia_isr_msix_kthread_bh, nv_default_irq_flags(nv),
nv_device_name, (void *)nvl);
if (rc)
{
for( j = 0; j < i; j++)
{
free_irq(nvl->msix_entries[i].vector, (void *)nvl);
}
break;
}
}
return rc;
}
#endif

View File

@@ -0,0 +1,176 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include <linux/kernel.h> // For container_of
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include "os-interface.h"
#include "nv-linux.h"
struct nv_nano_timer
{
struct hrtimer hr_timer; // This parameter holds linux high resolution timer object
// can get replaced with platform specific timer object
nv_linux_state_t *nv_linux_state;
void (*nv_nano_timer_callback)(struct nv_nano_timer *nv_nstimer);
void *pTmrEvent;
};
/*!
* @brief runs nano second resolution timer callback
*
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
*/
static void
nvidia_nano_timer_callback(
nv_nano_timer_t *nv_nstimer)
{
nv_state_t *nv = NULL;
nv_linux_state_t *nvl = nv_nstimer->nv_linux_state;
unsigned long flags;
nvidia_stack_t *sp = NULL;
if (nv_kmem_cache_alloc_stack(&sp) != 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: no cache memory \n");
return;
}
nv = NV_STATE_PTR(nvl);
if (rm_run_nano_timer_callback(sp, nv, nv_nstimer->pTmrEvent) != NV_OK)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Error in service of callback \n");
}
nv_kmem_cache_free_stack(sp);
}
/*!
* @brief Allocates nano second resolution timer object
*
* @returns nv_nano_timer_t allocated pointer
*/
static nv_nano_timer_t *nv_alloc_nano_timer(void)
{
nv_nano_timer_t *nv_nstimer;
NV_KMALLOC(nv_nstimer, sizeof(nv_nano_timer_t));
if (nv_nstimer == NULL)
{
return NULL;
}
memset(nv_nstimer, 0, sizeof(nv_nano_timer_t));
return nv_nstimer;
}
static enum hrtimer_restart nv_nano_timer_callback_typed_data(struct hrtimer *hrtmr)
{
struct nv_nano_timer *nv_nstimer =
container_of(hrtmr, struct nv_nano_timer, hr_timer);
nv_nstimer->nv_nano_timer_callback(nv_nstimer);
return HRTIMER_NORESTART;
}
/*!
* @brief Creates & initializes nano second resolution timer object
*
* @param[in] nv Per gpu linux state
* @param[in] tmrEvent pointer to TMR_EVENT
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
*/
void NV_API_CALL nv_create_nano_timer(
nv_state_t *nv,
void *pTmrEvent,
nv_nano_timer_t **pnv_nstimer)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
nv_nano_timer_t *nv_nstimer = nv_alloc_nano_timer();
if (nv_nstimer == NULL)
{
nv_printf(NV_DBG_ERRORS, "NVRM: Not able to create timer object \n");
*pnv_nstimer = NULL;
return;
}
nv_nstimer->nv_linux_state = nvl;
nv_nstimer->pTmrEvent = pTmrEvent;
nv_nstimer->nv_nano_timer_callback = nvidia_nano_timer_callback;
hrtimer_init(&nv_nstimer->hr_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
nv_nstimer->hr_timer.function = nv_nano_timer_callback_typed_data;
*pnv_nstimer = nv_nstimer;
}
/*!
* @brief Starts nano second resolution timer
*
* @param[in] nv Per gpu linux state
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
* @param[in] timens time in nano seconds
*/
void NV_API_CALL nv_start_nano_timer(
nv_state_t *nv,
nv_nano_timer_t *nv_nstimer,
NvU64 time_ns)
{
ktime_t ktime = ktime_set(0, time_ns);
hrtimer_start(&nv_nstimer->hr_timer, ktime, HRTIMER_MODE_REL);
}
/*!
* @brief Cancels nano second resolution timer
*
* @param[in] nv Per gpu linux state
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
*/
void NV_API_CALL nv_cancel_nano_timer(
nv_state_t *nv,
nv_nano_timer_t *nv_nstimer)
{
hrtimer_cancel(&nv_nstimer->hr_timer);
}
/*!
* @brief Cancels & deletes nano second resolution timer object
*
* @param[in] nv Per gpu linux state
* @param[in] nv_nstimer Pointer to nv_nano_timer_t object
*/
void NV_API_CALL nv_destroy_nano_timer(
nv_state_t *nv,
nv_nano_timer_t *nv_nstimer)
{
nv_cancel_nano_timer(nv, nv_nstimer);
NV_KFREE(nv_nstimer, sizeof(nv_nano_timer_t));
}

View File

@@ -0,0 +1,956 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "nv-ibmnpu.h"
#include "nv-rsync.h"
#include "nv-p2p.h"
#include "rmp2pdefines.h"
typedef struct nv_p2p_dma_mapping {
struct list_head list_node;
struct nvidia_p2p_dma_mapping *dma_mapping;
} nv_p2p_dma_mapping_t;
typedef struct nv_p2p_mem_info {
void (*free_callback)(void *data);
void *data;
struct nvidia_p2p_page_table page_table;
struct {
struct list_head list_head;
struct semaphore lock;
} dma_mapping_list;
NvBool bPersistent;
void *private;
} nv_p2p_mem_info_t;
int nvidia_p2p_cap_persistent_pages = 1;
EXPORT_SYMBOL(nvidia_p2p_cap_persistent_pages);
// declared and created in nv.c
extern void *nvidia_p2p_page_t_cache;
static struct nvidia_status_mapping {
NV_STATUS status;
int error;
} nvidia_status_mappings[] = {
{ NV_ERR_GENERIC, -EIO },
{ NV_ERR_INSUFFICIENT_RESOURCES, -ENOMEM },
{ NV_ERR_NO_MEMORY, -ENOMEM },
{ NV_ERR_INVALID_ARGUMENT, -EINVAL },
{ NV_ERR_INVALID_OBJECT_HANDLE, -EINVAL },
{ NV_ERR_INVALID_STATE, -EIO },
{ NV_ERR_NOT_SUPPORTED, -ENOTSUPP },
{ NV_ERR_OBJECT_NOT_FOUND, -EINVAL },
{ NV_ERR_STATE_IN_USE, -EBUSY },
{ NV_ERR_GPU_UUID_NOT_FOUND, -ENODEV },
{ NV_OK, 0 },
};
#define NVIDIA_STATUS_MAPPINGS \
(sizeof(nvidia_status_mappings) / sizeof(struct nvidia_status_mapping))
static int nvidia_p2p_map_status(NV_STATUS status)
{
int error = -EIO;
uint8_t i;
for (i = 0; i < NVIDIA_STATUS_MAPPINGS; i++)
{
if (nvidia_status_mappings[i].status == status)
{
error = nvidia_status_mappings[i].error;
break;
}
}
return error;
}
static NvU32 nvidia_p2p_page_size_mappings[NVIDIA_P2P_PAGE_SIZE_COUNT] = {
NVRM_P2P_PAGESIZE_SMALL_4K, NVRM_P2P_PAGESIZE_BIG_64K, NVRM_P2P_PAGESIZE_BIG_128K
};
static NV_STATUS nvidia_p2p_map_page_size(NvU32 page_size, NvU32 *page_size_index)
{
NvU32 i;
for (i = 0; i < NVIDIA_P2P_PAGE_SIZE_COUNT; i++)
{
if (nvidia_p2p_page_size_mappings[i] == page_size)
{
*page_size_index = i;
break;
}
}
if (i == NVIDIA_P2P_PAGE_SIZE_COUNT)
return NV_ERR_GENERIC;
return NV_OK;
}
static NV_STATUS nv_p2p_insert_dma_mapping(
struct nv_p2p_mem_info *mem_info,
struct nvidia_p2p_dma_mapping *dma_mapping
)
{
NV_STATUS status;
struct nv_p2p_dma_mapping *node;
status = os_alloc_mem((void**)&node, sizeof(*node));
if (status != NV_OK)
{
return status;
}
down(&mem_info->dma_mapping_list.lock);
node->dma_mapping = dma_mapping;
list_add_tail(&node->list_node, &mem_info->dma_mapping_list.list_head);
up(&mem_info->dma_mapping_list.lock);
return NV_OK;
}
static struct nvidia_p2p_dma_mapping* nv_p2p_remove_dma_mapping(
struct nv_p2p_mem_info *mem_info,
struct nvidia_p2p_dma_mapping *dma_mapping
)
{
struct nv_p2p_dma_mapping *cur;
struct nvidia_p2p_dma_mapping *ret_dma_mapping = NULL;
down(&mem_info->dma_mapping_list.lock);
list_for_each_entry(cur, &mem_info->dma_mapping_list.list_head, list_node)
{
if (dma_mapping == NULL || dma_mapping == cur->dma_mapping)
{
ret_dma_mapping = cur->dma_mapping;
list_del(&cur->list_node);
os_free_mem(cur);
break;
}
}
up(&mem_info->dma_mapping_list.lock);
return ret_dma_mapping;
}
static void nv_p2p_free_dma_mapping(
struct nvidia_p2p_dma_mapping *dma_mapping
)
{
nv_dma_device_t peer_dma_dev = {{ 0 }};
NvU32 page_size;
NV_STATUS status;
NvU32 i;
peer_dma_dev.dev = &dma_mapping->pci_dev->dev;
peer_dma_dev.addressable_range.limit = dma_mapping->pci_dev->dma_mask;
page_size = nvidia_p2p_page_size_mappings[dma_mapping->page_size_type];
if (dma_mapping->private != NULL)
{
WARN_ON(page_size != PAGE_SIZE);
status = nv_dma_unmap_alloc(&peer_dma_dev,
dma_mapping->entries,
dma_mapping->dma_addresses,
&dma_mapping->private);
WARN_ON(status != NV_OK);
}
else
{
for (i = 0; i < dma_mapping->entries; i++)
{
nv_dma_unmap_peer(&peer_dma_dev, page_size / PAGE_SIZE,
dma_mapping->dma_addresses[i]);
}
}
os_free_mem(dma_mapping->dma_addresses);
os_free_mem(dma_mapping);
}
static void nv_p2p_free_page_table(
struct nvidia_p2p_page_table *page_table
)
{
NvU32 i;
struct nvidia_p2p_dma_mapping *dma_mapping;
struct nv_p2p_mem_info *mem_info = NULL;
mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table);
dma_mapping = nv_p2p_remove_dma_mapping(mem_info, NULL);
while (dma_mapping != NULL)
{
nv_p2p_free_dma_mapping(dma_mapping);
dma_mapping = nv_p2p_remove_dma_mapping(mem_info, NULL);
}
for (i = 0; i < page_table->entries; i++)
{
NV_KMEM_CACHE_FREE(page_table->pages[i], nvidia_p2p_page_t_cache);
}
if (page_table->gpu_uuid != NULL)
{
os_free_mem(page_table->gpu_uuid);
}
if (page_table->pages != NULL)
{
os_free_mem(page_table->pages);
}
os_free_mem(mem_info);
}
static NV_STATUS nv_p2p_put_pages(
nvidia_stack_t * sp,
uint64_t p2p_token,
uint32_t va_space,
uint64_t virtual_address,
struct nvidia_p2p_page_table **page_table
)
{
NV_STATUS status;
struct nv_p2p_mem_info *mem_info = NULL;
mem_info = container_of(*page_table, nv_p2p_mem_info_t, page_table);
/*
* rm_p2p_put_pages returns NV_OK if the page_table was found and
* got unlinked from the RM's tracker (atomically). This ensures that
* RM's tear-down path does not race with this path.
*
* rm_p2p_put_pages returns NV_ERR_OBJECT_NOT_FOUND if the page_table
* was already unlinked.
*/
if (mem_info->bPersistent)
{
status = rm_p2p_put_pages_persistent(sp, mem_info->private, *page_table);
}
else
{
status = rm_p2p_put_pages(sp, p2p_token, va_space,
virtual_address, *page_table);
}
if (status == NV_OK)
{
nv_p2p_free_page_table(*page_table);
*page_table = NULL;
}
else if (!mem_info->bPersistent && (status == NV_ERR_OBJECT_NOT_FOUND))
{
status = NV_OK;
*page_table = NULL;
}
else
{
WARN_ON(status != NV_OK);
}
return status;
}
void NV_API_CALL nv_p2p_free_platform_data(
void *data
)
{
if (data == NULL)
{
WARN_ON(data == NULL);
return;
}
nv_p2p_free_page_table((struct nvidia_p2p_page_table*)data);
}
int nvidia_p2p_init_mapping(
uint64_t p2p_token,
struct nvidia_p2p_params *params,
void (*destroy_callback)(void *data),
void *data
)
{
return -ENOTSUPP;
}
EXPORT_SYMBOL(nvidia_p2p_init_mapping);
int nvidia_p2p_destroy_mapping(uint64_t p2p_token)
{
return -ENOTSUPP;
}
EXPORT_SYMBOL(nvidia_p2p_destroy_mapping);
static void nv_p2p_mem_info_free_callback(void *data)
{
nv_p2p_mem_info_t *mem_info = (nv_p2p_mem_info_t*) data;
mem_info->free_callback(mem_info->data);
nv_p2p_free_platform_data(&mem_info->page_table);
}
int nvidia_p2p_get_pages(
uint64_t p2p_token,
uint32_t va_space,
uint64_t virtual_address,
uint64_t length,
struct nvidia_p2p_page_table **page_table,
void (*free_callback)(void * data),
void *data
)
{
NV_STATUS status;
nvidia_stack_t *sp = NULL;
struct nvidia_p2p_page *page;
struct nv_p2p_mem_info *mem_info = NULL;
NvU32 entries;
NvU32 *wreqmb_h = NULL;
NvU32 *rreqmb_h = NULL;
NvU64 *physical_addresses = NULL;
NvU32 page_count;
NvU32 i = 0;
NvBool bGetPages = NV_FALSE;
NvBool bGetUuid = NV_FALSE;
NvU32 page_size = NVRM_P2P_PAGESIZE_BIG_64K;
NvU32 page_size_index;
NvU64 temp_length;
NvU8 *gpu_uuid = NULL;
NvU8 uuid[NVIDIA_P2P_GPU_UUID_LEN] = {0};
int rc;
rc = nv_kmem_cache_alloc_stack(&sp);
if (rc != 0)
{
return rc;
}
*page_table = NULL;
status = os_alloc_mem((void **)&mem_info, sizeof(*mem_info));
if (status != NV_OK)
{
goto failed;
}
memset(mem_info, 0, sizeof(*mem_info));
INIT_LIST_HEAD(&mem_info->dma_mapping_list.list_head);
NV_INIT_MUTEX(&mem_info->dma_mapping_list.lock);
*page_table = &(mem_info->page_table);
mem_info->bPersistent = (free_callback == NULL);
//asign length to temporary variable since do_div macro does in-place division
temp_length = length;
do_div(temp_length, page_size);
page_count = temp_length;
if (length & (page_size - 1))
{
page_count++;
}
status = os_alloc_mem((void **)&physical_addresses,
(page_count * sizeof(NvU64)));
if (status != NV_OK)
{
goto failed;
}
status = os_alloc_mem((void **)&wreqmb_h, (page_count * sizeof(NvU32)));
if (status != NV_OK)
{
goto failed;
}
status = os_alloc_mem((void **)&rreqmb_h, (page_count * sizeof(NvU32)));
if (status != NV_OK)
{
goto failed;
}
if (mem_info->bPersistent)
{
void *gpu_info = NULL;
if ((p2p_token != 0) || (va_space != 0))
{
status = -ENOTSUPP;
goto failed;
}
status = rm_p2p_get_gpu_info(sp, virtual_address, length, &gpu_uuid, &gpu_info);
if (status != NV_OK)
{
goto failed;
}
rc = nvidia_dev_get_uuid(gpu_uuid, sp);
if (rc != 0)
{
status = NV_ERR_GPU_UUID_NOT_FOUND;
goto failed;
}
os_mem_copy(uuid, gpu_uuid, NVIDIA_P2P_GPU_UUID_LEN);
bGetUuid = NV_TRUE;
status = rm_p2p_get_pages_persistent(sp, virtual_address, length, &mem_info->private,
physical_addresses, &entries, *page_table, gpu_info);
if (status != NV_OK)
{
goto failed;
}
}
else
{
// Get regular old-style, non-persistent mappings
status = rm_p2p_get_pages(sp, p2p_token, va_space,
virtual_address, length, physical_addresses, wreqmb_h,
rreqmb_h, &entries, &gpu_uuid, *page_table);
if (status != NV_OK)
{
goto failed;
}
}
bGetPages = NV_TRUE;
(*page_table)->gpu_uuid = gpu_uuid;
status = os_alloc_mem((void *)&(*page_table)->pages,
(entries * sizeof(page)));
if (status != NV_OK)
{
goto failed;
}
(*page_table)->version = NVIDIA_P2P_PAGE_TABLE_VERSION;
for (i = 0; i < entries; i++)
{
page = NV_KMEM_CACHE_ALLOC(nvidia_p2p_page_t_cache);
if (page == NULL)
{
status = NV_ERR_NO_MEMORY;
goto failed;
}
memset(page, 0, sizeof(*page));
page->physical_address = physical_addresses[i];
page->registers.fermi.wreqmb_h = wreqmb_h[i];
page->registers.fermi.rreqmb_h = rreqmb_h[i];
(*page_table)->pages[i] = page;
(*page_table)->entries++;
}
status = nvidia_p2p_map_page_size(page_size, &page_size_index);
if (status != NV_OK)
{
goto failed;
}
(*page_table)->page_size = page_size_index;
os_free_mem(physical_addresses);
os_free_mem(wreqmb_h);
os_free_mem(rreqmb_h);
if (free_callback != NULL)
{
mem_info->free_callback = free_callback;
mem_info->data = data;
status = rm_p2p_register_callback(sp, p2p_token, virtual_address, length,
*page_table, nv_p2p_mem_info_free_callback, mem_info);
if (status != NV_OK)
{
goto failed;
}
}
nv_kmem_cache_free_stack(sp);
return nvidia_p2p_map_status(status);
failed:
if (physical_addresses != NULL)
{
os_free_mem(physical_addresses);
}
if (wreqmb_h != NULL)
{
os_free_mem(wreqmb_h);
}
if (rreqmb_h != NULL)
{
os_free_mem(rreqmb_h);
}
if (bGetPages)
{
(void)nv_p2p_put_pages(sp, p2p_token, va_space,
virtual_address, page_table);
}
if (bGetUuid)
{
nvidia_dev_put_uuid(uuid, sp);
}
if (*page_table != NULL)
{
nv_p2p_free_page_table(*page_table);
}
nv_kmem_cache_free_stack(sp);
return nvidia_p2p_map_status(status);
}
EXPORT_SYMBOL(nvidia_p2p_get_pages);
/*
* This function is a no-op, but is left in place (for now), in order to allow
* third-party callers to build and run without errors or warnings. This is OK,
* because the missing functionality is provided by nv_p2p_free_platform_data,
* which is being called as part of the RM's cleanup path.
*/
int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table)
{
return 0;
}
EXPORT_SYMBOL(nvidia_p2p_free_page_table);
int nvidia_p2p_put_pages(
uint64_t p2p_token,
uint32_t va_space,
uint64_t virtual_address,
struct nvidia_p2p_page_table *page_table
)
{
struct nv_p2p_mem_info *mem_info = NULL;
NvU8 uuid[NVIDIA_P2P_GPU_UUID_LEN] = {0};
NV_STATUS status;
nvidia_stack_t *sp = NULL;
int rc = 0;
os_mem_copy(uuid, page_table->gpu_uuid, NVIDIA_P2P_GPU_UUID_LEN);
mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table);
rc = nv_kmem_cache_alloc_stack(&sp);
if (rc != 0)
{
return -ENOMEM;
}
status = nv_p2p_put_pages(sp, p2p_token, va_space,
virtual_address, &page_table);
if (mem_info->bPersistent)
{
nvidia_dev_put_uuid(uuid, sp);
}
nv_kmem_cache_free_stack(sp);
return nvidia_p2p_map_status(status);
}
EXPORT_SYMBOL(nvidia_p2p_put_pages);
int nvidia_p2p_dma_map_pages(
struct pci_dev *peer,
struct nvidia_p2p_page_table *page_table,
struct nvidia_p2p_dma_mapping **dma_mapping
)
{
NV_STATUS status;
nv_dma_device_t peer_dma_dev = {{ 0 }};
nvidia_stack_t *sp = NULL;
NvU64 *dma_addresses = NULL;
NvU32 page_count;
NvU32 page_size;
enum nvidia_p2p_page_size_type page_size_type;
struct nv_p2p_mem_info *mem_info = NULL;
NvU32 i;
void *priv;
int rc;
if (peer == NULL || page_table == NULL || dma_mapping == NULL ||
page_table->gpu_uuid == NULL)
{
return -EINVAL;
}
mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table);
rc = nv_kmem_cache_alloc_stack(&sp);
if (rc != 0)
{
return rc;
}
*dma_mapping = NULL;
status = os_alloc_mem((void **)dma_mapping, sizeof(**dma_mapping));
if (status != NV_OK)
{
goto failed;
}
memset(*dma_mapping, 0, sizeof(**dma_mapping));
page_count = page_table->entries;
status = os_alloc_mem((void **)&dma_addresses,
(page_count * sizeof(NvU64)));
if (status != NV_OK)
{
goto failed;
}
page_size_type = page_table->page_size;
BUG_ON((page_size_type <= NVIDIA_P2P_PAGE_SIZE_4KB) ||
(page_size_type >= NVIDIA_P2P_PAGE_SIZE_COUNT));
peer_dma_dev.dev = &peer->dev;
peer_dma_dev.addressable_range.limit = peer->dma_mask;
page_size = nvidia_p2p_page_size_mappings[page_size_type];
for (i = 0; i < page_count; i++)
{
dma_addresses[i] = page_table->pages[i]->physical_address;
}
status = rm_p2p_dma_map_pages(sp, &peer_dma_dev,
page_table->gpu_uuid, page_size, page_count, dma_addresses, &priv);
if (status != NV_OK)
{
goto failed;
}
(*dma_mapping)->version = NVIDIA_P2P_DMA_MAPPING_VERSION;
(*dma_mapping)->page_size_type = page_size_type;
(*dma_mapping)->entries = page_count;
(*dma_mapping)->dma_addresses = dma_addresses;
(*dma_mapping)->private = priv;
(*dma_mapping)->pci_dev = peer;
/*
* All success, it is safe to insert dma_mapping now.
*/
status = nv_p2p_insert_dma_mapping(mem_info, *dma_mapping);
if (status != NV_OK)
{
goto failed_insert;
}
nv_kmem_cache_free_stack(sp);
return 0;
failed_insert:
nv_p2p_free_dma_mapping(*dma_mapping);
dma_addresses = NULL;
*dma_mapping = NULL;
failed:
if (dma_addresses != NULL)
{
os_free_mem(dma_addresses);
}
if (*dma_mapping != NULL)
{
os_free_mem(*dma_mapping);
*dma_mapping = NULL;
}
nv_kmem_cache_free_stack(sp);
return nvidia_p2p_map_status(status);
}
EXPORT_SYMBOL(nvidia_p2p_dma_map_pages);
int nvidia_p2p_dma_unmap_pages(
struct pci_dev *peer,
struct nvidia_p2p_page_table *page_table,
struct nvidia_p2p_dma_mapping *dma_mapping
)
{
struct nv_p2p_mem_info *mem_info = NULL;
if (peer == NULL || dma_mapping == NULL || page_table == NULL)
{
return -EINVAL;
}
mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table);
/*
* nv_p2p_remove_dma_mapping returns dma_mapping if the dma_mapping was
* found and got unlinked from the mem_info->dma_mapping_list (atomically).
* This ensures that the RM's tear-down path does not race with this path.
*
* nv_p2p_remove_dma_mappings returns NULL if the dma_mapping was already
* unlinked.
*/
if (nv_p2p_remove_dma_mapping(mem_info, dma_mapping) == NULL)
{
return 0;
}
WARN_ON(peer != dma_mapping->pci_dev);
BUG_ON((dma_mapping->page_size_type <= NVIDIA_P2P_PAGE_SIZE_4KB) ||
(dma_mapping->page_size_type >= NVIDIA_P2P_PAGE_SIZE_COUNT));
nv_p2p_free_dma_mapping(dma_mapping);
return 0;
}
EXPORT_SYMBOL(nvidia_p2p_dma_unmap_pages);
/*
* This function is a no-op, but is left in place (for now), in order to allow
* third-party callers to build and run without errors or warnings. This is OK,
* because the missing functionality is provided by nv_p2p_free_platform_data,
* which is being called as part of the RM's cleanup path.
*/
int nvidia_p2p_free_dma_mapping(
struct nvidia_p2p_dma_mapping *dma_mapping
)
{
return 0;
}
EXPORT_SYMBOL(nvidia_p2p_free_dma_mapping);
int nvidia_p2p_register_rsync_driver(
nvidia_p2p_rsync_driver_t *driver,
void *data
)
{
if (driver == NULL)
{
return -EINVAL;
}
if (!NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(driver))
{
return -EINVAL;
}
if (driver->get_relaxed_ordering_mode == NULL ||
driver->put_relaxed_ordering_mode == NULL ||
driver->wait_for_rsync == NULL)
{
return -EINVAL;
}
return nv_register_rsync_driver(driver->get_relaxed_ordering_mode,
driver->put_relaxed_ordering_mode,
driver->wait_for_rsync, data);
}
EXPORT_SYMBOL(nvidia_p2p_register_rsync_driver);
void nvidia_p2p_unregister_rsync_driver(
nvidia_p2p_rsync_driver_t *driver,
void *data
)
{
if (driver == NULL)
{
WARN_ON(1);
return;
}
if (!NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(driver))
{
WARN_ON(1);
return;
}
if (driver->get_relaxed_ordering_mode == NULL ||
driver->put_relaxed_ordering_mode == NULL ||
driver->wait_for_rsync == NULL)
{
WARN_ON(1);
return;
}
nv_unregister_rsync_driver(driver->get_relaxed_ordering_mode,
driver->put_relaxed_ordering_mode,
driver->wait_for_rsync, data);
}
EXPORT_SYMBOL(nvidia_p2p_unregister_rsync_driver);
int nvidia_p2p_get_rsync_registers(
nvidia_p2p_rsync_reg_info_t **reg_info
)
{
nv_linux_state_t *nvl;
nv_state_t *nv;
NV_STATUS status;
void *ptr = NULL;
NvU64 addr;
NvU64 size;
struct pci_dev *ibmnpu = NULL;
NvU32 index = 0;
NvU32 count = 0;
nvidia_p2p_rsync_reg_info_t *info = NULL;
nvidia_p2p_rsync_reg_t *regs = NULL;
if (reg_info == NULL)
{
return -EINVAL;
}
status = os_alloc_mem((void**)&info, sizeof(*info));
if (status != NV_OK)
{
return -ENOMEM;
}
memset(info, 0, sizeof(*info));
info->version = NVIDIA_P2P_RSYNC_REG_INFO_VERSION;
LOCK_NV_LINUX_DEVICES();
for (nvl = nv_linux_devices; nvl; nvl = nvl->next)
{
count++;
}
status = os_alloc_mem((void**)&regs, (count * sizeof(*regs)));
if (status != NV_OK)
{
nvidia_p2p_put_rsync_registers(info);
UNLOCK_NV_LINUX_DEVICES();
return -ENOMEM;
}
for (nvl = nv_linux_devices; nvl; nvl = nvl->next)
{
nv = NV_STATE_PTR(nvl);
addr = 0;
size = 0;
status = nv_get_ibmnpu_genreg_info(nv, &addr, &size, (void**)&ibmnpu);
if (status != NV_OK)
{
continue;
}
ptr = nv_ioremap_nocache(addr, size);
if (ptr == NULL)
{
continue;
}
regs[index].ptr = ptr;
regs[index].size = size;
regs[index].gpu = nvl->pci_dev;
regs[index].ibmnpu = ibmnpu;
regs[index].cluster_id = 0;
regs[index].socket_id = nv_get_ibmnpu_chip_id(nv);
index++;
}
UNLOCK_NV_LINUX_DEVICES();
info->regs = regs;
info->entries = index;
if (info->entries == 0)
{
nvidia_p2p_put_rsync_registers(info);
return -ENODEV;
}
*reg_info = info;
return 0;
}
EXPORT_SYMBOL(nvidia_p2p_get_rsync_registers);
void nvidia_p2p_put_rsync_registers(
nvidia_p2p_rsync_reg_info_t *reg_info
)
{
NvU32 i;
nvidia_p2p_rsync_reg_t *regs = NULL;
if (reg_info == NULL)
{
return;
}
if (reg_info->regs)
{
for (i = 0; i < reg_info->entries; i++)
{
regs = &reg_info->regs[i];
if (regs->ptr)
{
nv_iounmap(regs->ptr, regs->size);
}
}
os_free_mem(reg_info->regs);
}
os_free_mem(reg_info);
}
EXPORT_SYMBOL(nvidia_p2p_put_rsync_registers);

View File

@@ -0,0 +1,427 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2011-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_P2P_H_
#define _NV_P2P_H_
/*
* NVIDIA P2P Structure Versioning
*
* For the nvidia_p2p_*_t structures allocated by the NVIDIA driver, it will
* set the version field of the structure according to the definition used by
* the NVIDIA driver. The "major" field of the version is defined as the upper
* 16 bits, and the "minor" field of the version is defined as the lower 16
* bits. The version field will always be the first 4 bytes of the structure,
* and third-party drivers should check the value of this field in structures
* allocated by the NVIDIA driver to ensure runtime compatibility.
*
* In general, version numbers will be incremented as follows:
* - When a backwards-compatible change is made to the structure layout, the
* minor version for that structure will be incremented. Third-party drivers
* built against an older minor version will continue to work with the newer
* minor version used by the NVIDIA driver, without recompilation.
* - When a breaking change is made to the structure layout, the major version
* will be incremented. Third-party drivers built against an older major
* version require at least recompilation and potentially additional updates
* to use the new API.
*/
#define NVIDIA_P2P_MAJOR_VERSION_MASK 0xffff0000
#define NVIDIA_P2P_MINOR_VERSION_MASK 0x0000ffff
#define NVIDIA_P2P_MAJOR_VERSION(v) \
(((v) & NVIDIA_P2P_MAJOR_VERSION_MASK) >> 16)
#define NVIDIA_P2P_MINOR_VERSION(v) \
(((v) & NVIDIA_P2P_MINOR_VERSION_MASK))
#define NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) \
(NVIDIA_P2P_MAJOR_VERSION((p)->version) == NVIDIA_P2P_MAJOR_VERSION(v))
#define NVIDIA_P2P_VERSION_COMPATIBLE(p, v) \
(NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) && \
(NVIDIA_P2P_MINOR_VERSION((p)->version) >= (NVIDIA_P2P_MINOR_VERSION(v))))
enum {
NVIDIA_P2P_ARCHITECTURE_TESLA = 0,
NVIDIA_P2P_ARCHITECTURE_FERMI,
NVIDIA_P2P_ARCHITECTURE_CURRENT = NVIDIA_P2P_ARCHITECTURE_FERMI
};
#define NVIDIA_P2P_PARAMS_VERSION 0x00010001
enum {
NVIDIA_P2P_PARAMS_ADDRESS_INDEX_GPU = 0,
NVIDIA_P2P_PARAMS_ADDRESS_INDEX_THIRD_PARTY_DEVICE,
NVIDIA_P2P_PARAMS_ADDRESS_INDEX_MAX = \
NVIDIA_P2P_PARAMS_ADDRESS_INDEX_THIRD_PARTY_DEVICE
};
#define NVIDIA_P2P_GPU_UUID_LEN 16
typedef
struct nvidia_p2p_params {
uint32_t version;
uint32_t architecture;
union nvidia_p2p_mailbox_addresses {
struct {
uint64_t wmb_addr;
uint64_t wmb_data;
uint64_t rreq_addr;
uint64_t rcomp_addr;
uint64_t reserved[2];
} fermi;
} addresses[NVIDIA_P2P_PARAMS_ADDRESS_INDEX_MAX+1];
} nvidia_p2p_params_t;
/*
* Capability flag for users to detect
* driver support for persistent pages.
*/
extern int nvidia_p2p_cap_persistent_pages;
#define NVIDIA_P2P_CAP_PERSISTENT_PAGES
/*
* This API is not supported.
*/
int nvidia_p2p_init_mapping(uint64_t p2p_token,
struct nvidia_p2p_params *params,
void (*destroy_callback)(void *data),
void *data);
/*
* This API is not supported.
*/
int nvidia_p2p_destroy_mapping(uint64_t p2p_token);
enum nvidia_p2p_page_size_type {
NVIDIA_P2P_PAGE_SIZE_4KB = 0,
NVIDIA_P2P_PAGE_SIZE_64KB,
NVIDIA_P2P_PAGE_SIZE_128KB,
NVIDIA_P2P_PAGE_SIZE_COUNT
};
typedef
struct nvidia_p2p_page {
uint64_t physical_address;
union nvidia_p2p_request_registers {
struct {
uint32_t wreqmb_h;
uint32_t rreqmb_h;
uint32_t rreqmb_0;
uint32_t reserved[3];
} fermi;
} registers;
} nvidia_p2p_page_t;
#define NVIDIA_P2P_PAGE_TABLE_VERSION 0x00010002
#define NVIDIA_P2P_PAGE_TABLE_VERSION_COMPATIBLE(p) \
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_PAGE_TABLE_VERSION)
typedef
struct nvidia_p2p_page_table {
uint32_t version;
uint32_t page_size; /* enum nvidia_p2p_page_size_type */
struct nvidia_p2p_page **pages;
uint32_t entries;
uint8_t *gpu_uuid;
} nvidia_p2p_page_table_t;
/*
* @brief
* Make the pages underlying a range of GPU virtual memory
* accessible to a third-party device.
*
* This API only supports pinned, GPU-resident memory, such as that provided
* by cudaMalloc().
*
* This API may sleep.
*
* @param[in] p2p_token
* A token that uniquely identifies the P2P mapping.
* @param[in] va_space
* A GPU virtual address space qualifier.
* @param[in] virtual_address
* The start address in the specified virtual address space.
* Address must be aligned to the 64KB boundary.
* @param[in] length
* The length of the requested P2P mapping.
* Length must be a multiple of 64KB.
* @param[out] page_table
* A pointer to an array of structures with P2P PTEs.
* @param[in] free_callback
* A pointer to the function to be invoked when the pages
* underlying the virtual address range are freed
* implicitly.
* If NULL, persistent pages will be returned.
* This means the pages underlying the range of GPU virtual memory
* will persist until explicitly freed by nvidia_p2p_put_pages().
* Persistent GPU memory mappings are not supported on PowerPC,
* MIG-enabled devices, APM-enabled devices and vGPU.
* @param[in] data
* A non-NULL opaque pointer to private data to be passed to the
* callback function.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -ENOTSUPP if the requested operation is not supported.
* -ENOMEM if the driver failed to allocate memory or if
* insufficient resources were available to complete the operation.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_get_pages(uint64_t p2p_token, uint32_t va_space,
uint64_t virtual_address,
uint64_t length,
struct nvidia_p2p_page_table **page_table,
void (*free_callback)(void *data),
void *data);
#define NVIDIA_P2P_DMA_MAPPING_VERSION 0x00020003
#define NVIDIA_P2P_DMA_MAPPING_VERSION_COMPATIBLE(p) \
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_DMA_MAPPING_VERSION)
struct pci_dev;
typedef
struct nvidia_p2p_dma_mapping {
uint32_t version;
enum nvidia_p2p_page_size_type page_size_type;
uint32_t entries;
uint64_t *dma_addresses;
void *private;
struct pci_dev *pci_dev;
} nvidia_p2p_dma_mapping_t;
/*
* @brief
* Make the physical pages retrieved using nvidia_p2p_get_pages accessible to
* a third-party device.
*
* @param[in] peer
* The struct pci_dev * of the peer device that needs to DMA to/from the
* mapping.
* @param[in] page_table
* The page table outlining the physical pages underlying the mapping, as
* retrieved with nvidia_p2p_get_pages().
* @param[out] dma_mapping
* The DMA mapping containing the DMA addresses to use on the third-party
* device.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -ENOTSUPP if the requested operation is not supported.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_dma_map_pages(struct pci_dev *peer,
struct nvidia_p2p_page_table *page_table,
struct nvidia_p2p_dma_mapping **dma_mapping);
/*
* @brief
* Unmap the physical pages previously mapped to the third-party device by
* nvidia_p2p_dma_map_pages().
*
* @param[in] peer
* The struct pci_dev * of the peer device that the DMA mapping belongs to.
* @param[in] page_table
* The page table backing the DMA mapping to be unmapped.
* @param[in] dma_mapping
* The DMA mapping containing the DMA addresses used by the third-party
* device, as retrieved with nvidia_p2p_dma_map_pages(). After this call
* returns, neither this struct nor the addresses contained within will be
* valid for use by the third-party device.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_dma_unmap_pages(struct pci_dev *peer,
struct nvidia_p2p_page_table *page_table,
struct nvidia_p2p_dma_mapping *dma_mapping);
/*
* @brief
* Release a set of pages previously made accessible to
* a third-party device.
*
* @param[in] p2p_token
* A token that uniquely identifies the P2P mapping.
* @param[in] va_space
* A GPU virtual address space qualifier.
* @param[in] virtual_address
* The start address in the specified virtual address space.
* @param[in] page_table
* A pointer to the array of structures with P2P PTEs.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
* -EIO if an unknown error occurred.
*/
int nvidia_p2p_put_pages(uint64_t p2p_token, uint32_t va_space,
uint64_t virtual_address,
struct nvidia_p2p_page_table *page_table);
/*
* @brief
* Free a third-party P2P page table. (This function is a no-op.)
*
* @param[in] page_table
* A pointer to the array of structures with P2P PTEs.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
*/
int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table);
/*
* @brief
* Free a third-party P2P DMA mapping. (This function is a no-op.)
*
* @param[in] dma_mapping
* A pointer to the DMA mapping structure.
*
* @return
* 0 upon successful completion.
* -EINVAL if an invalid argument was supplied.
*/
int nvidia_p2p_free_dma_mapping(struct nvidia_p2p_dma_mapping *dma_mapping);
#define NVIDIA_P2P_RSYNC_DRIVER_VERSION 0x00010001
#define NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(p) \
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_RSYNC_DRIVER_VERSION)
typedef
struct nvidia_p2p_rsync_driver {
uint32_t version;
int (*get_relaxed_ordering_mode)(int *mode, void *data);
void (*put_relaxed_ordering_mode)(int mode, void *data);
void (*wait_for_rsync)(struct pci_dev *gpu, void *data);
} nvidia_p2p_rsync_driver_t;
/*
* @brief
* Registers the rsync driver.
*
* @param[in] driver
* A pointer to the rsync driver structure. The NVIDIA driver would use,
*
* get_relaxed_ordering_mode to obtain a reference to the current relaxed
* ordering mode (treated as a boolean) from the rsync driver.
*
* put_relaxed_ordering_mode to release a reference to the current relaxed
* ordering mode back to the rsync driver. The NVIDIA driver will call this
* function once for each successful call to get_relaxed_ordering_mode, and
* the relaxed ordering mode must not change until the last reference is
* released.
*
* wait_for_rsync to call into the rsync module to issue RSYNC. This callback
* can't sleep or re-schedule as it may arrive under spinlocks.
* @param[in] data
* A pointer to the rsync driver's private data.
*
* @Returns
* 0 upon successful completion.
* -EINVAL parameters are incorrect.
* -EBUSY if a module is already registered or GPU devices are in use.
*/
int nvidia_p2p_register_rsync_driver(nvidia_p2p_rsync_driver_t *driver,
void *data);
/*
* @brief
* Unregisters the rsync driver.
*
* @param[in] driver
* A pointer to the rsync driver structure.
* @param[in] data
* A pointer to the rsync driver's private data.
*/
void nvidia_p2p_unregister_rsync_driver(nvidia_p2p_rsync_driver_t *driver,
void *data);
#define NVIDIA_P2P_RSYNC_REG_INFO_VERSION 0x00020001
#define NVIDIA_P2P_RSYNC_REG_INFO_VERSION_COMPATIBLE(p) \
NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_RSYNC_REG_INFO_VERSION)
typedef struct nvidia_p2p_rsync_reg {
void *ptr;
size_t size;
struct pci_dev *ibmnpu;
struct pci_dev *gpu;
uint32_t cluster_id;
uint32_t socket_id;
} nvidia_p2p_rsync_reg_t;
typedef struct nvidia_p2p_rsync_reg_info {
uint32_t version;
nvidia_p2p_rsync_reg_t *regs;
size_t entries;
} nvidia_p2p_rsync_reg_info_t;
/*
* @brief
* Gets rsync (GEN-ID) register information associated with the supported
* NPUs.
*
* The caller would use the returned information {GPU device, NPU device,
* socket-id, cluster-id} to pick the optimal generation registers to issue
* RSYNC (NVLink HW flush).
*
* The interface allocates structures to return the information, hence
* nvidia_p2p_put_rsync_registers() must be called to free the structures.
*
* Note, cluster-id is hardcoded to zero as early system configurations would
* only support cluster mode i.e. all devices would share the same cluster-id
* (0). In the future, appropriate kernel support would be needed to query
* cluster-ids.
*
* @param[out] reg_info
* A pointer to the rsync reg info structure.
*
* @Returns
* 0 Upon successful completion. Otherwise, returns negative value.
*/
int nvidia_p2p_get_rsync_registers(nvidia_p2p_rsync_reg_info_t **reg_info);
/*
* @brief
* Frees the structures allocated by nvidia_p2p_get_rsync_registers().
*
* @param[in] reg_info
* A pointer to the rsync reg info structure.
*/
void nvidia_p2p_put_rsync_registers(nvidia_p2p_rsync_reg_info_t *reg_info);
#endif /* _NV_P2P_H_ */

View File

@@ -0,0 +1,478 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "nv-reg.h"
#include "nv-pat.h"
int nv_pat_mode = NV_PAT_MODE_DISABLED;
#if defined(NV_ENABLE_PAT_SUPPORT)
/*
* Private PAT support for use by the NVIDIA driver. This is used on
* kernels that do not modify the PAT to include a write-combining
* entry.
*
* On kernels that have CONFIG_X86_PAT, the NVIDIA driver still checks that the
* WC entry is as expected before using PAT.
*/
#if defined(CONFIG_X86_PAT)
#define NV_ENABLE_BUILTIN_PAT_SUPPORT 0
#else
#define NV_ENABLE_BUILTIN_PAT_SUPPORT 1
#endif
#define NV_READ_PAT_ENTRIES(pat1, pat2) rdmsr(0x277, (pat1), (pat2))
#define NV_WRITE_PAT_ENTRIES(pat1, pat2) wrmsr(0x277, (pat1), (pat2))
#define NV_PAT_ENTRY(pat, index) \
(((pat) & (0xff << ((index)*8))) >> ((index)*8))
#if NV_ENABLE_BUILTIN_PAT_SUPPORT
static unsigned long orig_pat1, orig_pat2;
static inline void nv_disable_caches(unsigned long *cr4)
{
unsigned long cr0 = read_cr0();
write_cr0(((cr0 & (0xdfffffff)) | 0x40000000));
wbinvd();
*cr4 = NV_READ_CR4();
if (*cr4 & 0x80) NV_WRITE_CR4(*cr4 & ~0x80);
__flush_tlb();
}
static inline void nv_enable_caches(unsigned long cr4)
{
unsigned long cr0 = read_cr0();
wbinvd();
__flush_tlb();
write_cr0((cr0 & 0x9fffffff));
if (cr4 & 0x80) NV_WRITE_CR4(cr4);
}
static void nv_setup_pat_entries(void *info)
{
unsigned long pat1, pat2, cr4;
unsigned long eflags;
#if defined(NV_ENABLE_HOTPLUG_CPU)
int cpu = (NvUPtr)info;
if ((cpu != 0) && (cpu != (int)smp_processor_id()))
return;
#endif
NV_SAVE_FLAGS(eflags);
NV_CLI();
nv_disable_caches(&cr4);
NV_READ_PAT_ENTRIES(pat1, pat2);
pat1 &= 0xffff00ff;
pat1 |= 0x00000100;
NV_WRITE_PAT_ENTRIES(pat1, pat2);
nv_enable_caches(cr4);
NV_RESTORE_FLAGS(eflags);
}
static void nv_restore_pat_entries(void *info)
{
unsigned long cr4;
unsigned long eflags;
#if defined(NV_ENABLE_HOTPLUG_CPU)
int cpu = (NvUPtr)info;
if ((cpu != 0) && (cpu != (int)smp_processor_id()))
return;
#endif
NV_SAVE_FLAGS(eflags);
NV_CLI();
nv_disable_caches(&cr4);
NV_WRITE_PAT_ENTRIES(orig_pat1, orig_pat2);
nv_enable_caches(cr4);
NV_RESTORE_FLAGS(eflags);
}
/*
* NOTE 1:
* Functions register_cpu_notifier(), unregister_cpu_notifier(),
* macros register_hotcpu_notifier, register_hotcpu_notifier,
* and CPU states CPU_DOWN_FAILED, CPU_DOWN_PREPARE
* were removed by the following commit:
* 2016 Dec 25: b272f732f888d4cf43c943a40c9aaa836f9b7431
*
* NV_REGISTER_CPU_NOTIFIER_PRESENT is true when
* register_cpu_notifier() is present.
*
* The functions cpuhp_setup_state() and cpuhp_remove_state() should be
* used as an alternative to register_cpu_notifier() and
* unregister_cpu_notifier() functions. The following
* commit introduced these functions as well as the enum cpuhp_state.
* 2016 Feb 26: 5b7aa87e0482be768486e0c2277aa4122487eb9d
*
* NV_CPUHP_CPUHP_STATE_PRESENT is true when cpuhp_setup_state() is present.
*
* For kernels where both cpuhp_setup_state() and register_cpu_notifier()
* are present, we still use register_cpu_notifier().
*/
static int
nvidia_cpu_teardown(unsigned int cpu)
{
#if defined(NV_ENABLE_HOTPLUG_CPU)
unsigned int this_cpu = get_cpu();
if (this_cpu == cpu)
nv_restore_pat_entries(NULL);
else
smp_call_function(nv_restore_pat_entries, &cpu, 1);
put_cpu();
#endif
return 0;
}
static int
nvidia_cpu_online(unsigned int cpu)
{
#if defined(NV_ENABLE_HOTPLUG_CPU)
unsigned int this_cpu = get_cpu();
if (this_cpu == cpu)
nv_setup_pat_entries(NULL);
else
smp_call_function(nv_setup_pat_entries, &cpu, 1);
put_cpu();
#endif
return 0;
}
static int nv_enable_builtin_pat_support(void)
{
unsigned long pat1, pat2;
NV_READ_PAT_ENTRIES(orig_pat1, orig_pat2);
nv_printf(NV_DBG_SETUP, "saved orig pats as 0x%lx 0x%lx\n", orig_pat1, orig_pat2);
on_each_cpu(nv_setup_pat_entries, NULL, 1);
NV_READ_PAT_ENTRIES(pat1, pat2);
nv_printf(NV_DBG_SETUP, "changed pats to 0x%lx 0x%lx\n", pat1, pat2);
return 1;
}
static void nv_disable_builtin_pat_support(void)
{
unsigned long pat1, pat2;
on_each_cpu(nv_restore_pat_entries, NULL, 1);
nv_pat_mode = NV_PAT_MODE_DISABLED;
NV_READ_PAT_ENTRIES(pat1, pat2);
nv_printf(NV_DBG_SETUP, "restored orig pats as 0x%lx 0x%lx\n", pat1, pat2);
}
static int
nvidia_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
/* CPU_DOWN_FAILED was added by the following commit
* 2004 Oct 18: 71da3667be80d30121df3972caa0bf5684228379
*
* CPU_DOWN_PREPARE was added by the following commit
* 2004 Oct 18: d13d28de21d913aacd3c91e76e307fa2eb7835d8
*
* We use one ifdef for both macros since they were added on the same day.
*/
#if defined(CPU_DOWN_FAILED)
switch (action)
{
case CPU_DOWN_FAILED:
case CPU_ONLINE:
nvidia_cpu_online((NvUPtr)hcpu);
break;
case CPU_DOWN_PREPARE:
nvidia_cpu_teardown((NvUPtr)hcpu);
break;
}
#endif
return NOTIFY_OK;
}
/*
* See NOTE 1.
* In order to avoid warnings for unused variable when compiling against
* kernel versions which include changes of commit id
* b272f732f888d4cf43c943a40c9aaa836f9b7431, we have to protect declaration
* of nv_hotcpu_nfb with #if.
*
* NV_REGISTER_CPU_NOTIFIER_PRESENT is checked before
* NV_CPUHP_SETUP_STATE_PRESENT to avoid compilation warnings for unused
* variable nvidia_pat_online for kernels where both
* NV_REGISTER_CPU_NOTIFIER_PRESENT and NV_CPUHP_SETUP_STATE_PRESENT
* are true.
*/
#if defined(NV_REGISTER_CPU_NOTIFIER_PRESENT) && defined(CONFIG_HOTPLUG_CPU)
static struct notifier_block nv_hotcpu_nfb = {
.notifier_call = nvidia_cpu_callback,
.priority = 0
};
#elif defined(NV_CPUHP_SETUP_STATE_PRESENT)
static enum cpuhp_state nvidia_pat_online;
#endif
static int
nvidia_register_cpu_hotplug_notifier(void)
{
int ret;
/* See NOTE 1 */
#if defined(NV_REGISTER_CPU_NOTIFIER_PRESENT) && defined(CONFIG_HOTPLUG_CPU)
/* register_hotcpu_notiifer() returns 0 on success or -ENOENT on failure */
ret = register_hotcpu_notifier(&nv_hotcpu_nfb);
#elif defined(NV_CPUHP_SETUP_STATE_PRESENT)
/*
* cpuhp_setup_state() returns positive number on success when state is
* CPUHP_AP_ONLINE_DYN. On failure, it returns a negative number.
*/
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"nvidia/pat:online",
nvidia_cpu_online,
nvidia_cpu_teardown);
if (ret < 0)
{
/*
* If cpuhp_setup_state() fails, the cpuhp_remove_state()
* should never be called. If it gets called, we might remove
* some other state. Hence, explicitly set
* nvidia_pat_online to zero. This will trigger a BUG()
* in cpuhp_remove_state().
*/
nvidia_pat_online = 0;
}
else
{
nvidia_pat_online = ret;
}
#else
/*
* This function should be a no-op for kernels which
* - do not have CONFIG_HOTPLUG_CPU enabled,
* - do not have PAT support,
* - do not have the cpuhp_setup_state() function.
*
* On such kernels, returning an error here would result in module init
* failure. Hence, return 0 here.
*/
if (nv_pat_mode == NV_PAT_MODE_BUILTIN)
{
ret = 0;
}
else
{
ret = -EIO;
}
#endif
if (ret < 0)
{
nv_disable_pat_support();
nv_printf(NV_DBG_ERRORS,
"NVRM: CPU hotplug notifier registration failed!\n");
return -EIO;
}
return 0;
}
static void
nvidia_unregister_cpu_hotplug_notifier(void)
{
/* See NOTE 1 */
#if defined(NV_REGISTER_CPU_NOTIFIER_PRESENT) && defined(CONFIG_HOTPLUG_CPU)
unregister_hotcpu_notifier(&nv_hotcpu_nfb);
#elif defined(NV_CPUHP_SETUP_STATE_PRESENT)
cpuhp_remove_state(nvidia_pat_online);
#endif
return;
}
#else /* NV_ENABLE_BUILTIN_PAT_SUPPORT */
static int nv_enable_builtin_pat_support(void)
{
return 0;
}
static void nv_disable_builtin_pat_support(void)
{
}
static int nvidia_register_cpu_hotplug_notifier(void)
{
return -EIO;
}
static void nvidia_unregister_cpu_hotplug_notifier(void)
{
}
#endif /* NV_ENABLE_BUILTIN_PAT_SUPPORT */
static int nv_determine_pat_mode(void)
{
unsigned int pat1, pat2, i;
NvU8 PAT_WC_index;
if (!test_bit(X86_FEATURE_PAT,
(volatile unsigned long *)&boot_cpu_data.x86_capability))
{
if ((boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) ||
(boot_cpu_data.cpuid_level < 1) ||
((cpuid_edx(1) & (1 << 16)) == 0) ||
(boot_cpu_data.x86 != 6) || (boot_cpu_data.x86_model >= 15))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: CPU does not support the PAT.\n");
return NV_PAT_MODE_DISABLED;
}
}
NV_READ_PAT_ENTRIES(pat1, pat2);
PAT_WC_index = 0xf;
for (i = 0; i < 4; i++)
{
if (NV_PAT_ENTRY(pat1, i) == 0x01)
{
PAT_WC_index = i;
break;
}
if (NV_PAT_ENTRY(pat2, i) == 0x01)
{
PAT_WC_index = (i + 4);
break;
}
}
if (PAT_WC_index == 1)
{
return NV_PAT_MODE_KERNEL;
}
else if (PAT_WC_index != 0xf)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: PAT configuration unsupported.\n");
return NV_PAT_MODE_DISABLED;
}
else
{
#if NV_ENABLE_BUILTIN_PAT_SUPPORT
return NV_PAT_MODE_BUILTIN;
#else
return NV_PAT_MODE_DISABLED;
#endif /* NV_ENABLE_BUILTIN_PAT_SUPPORT */
}
}
int nv_enable_pat_support(void)
{
if (nv_pat_mode != NV_PAT_MODE_DISABLED)
return 1;
nv_pat_mode = nv_determine_pat_mode();
switch (nv_pat_mode)
{
case NV_PAT_MODE_DISABLED:
/* avoid the PAT if unavailable/unusable */
return 0;
case NV_PAT_MODE_KERNEL:
/* inherit the kernel's PAT layout */
return 1;
case NV_PAT_MODE_BUILTIN:
/* use builtin code to modify the PAT layout */
break;
}
return nv_enable_builtin_pat_support();
}
void nv_disable_pat_support(void)
{
if (nv_pat_mode != NV_PAT_MODE_BUILTIN)
return;
nv_disable_builtin_pat_support();
}
int nv_init_pat_support(nvidia_stack_t *sp)
{
NV_STATUS status;
NvU32 data;
int disable_pat = 0;
int ret = 0;
status = rm_read_registry_dword(sp, NULL,
NV_USE_PAGE_ATTRIBUTE_TABLE, &data);
if ((status == NV_OK) && ((int)data != ~0))
{
disable_pat = (data == 0);
}
if (!disable_pat)
{
nv_enable_pat_support();
if (nv_pat_mode == NV_PAT_MODE_BUILTIN)
{
ret = nvidia_register_cpu_hotplug_notifier();
return ret;
}
}
else
{
nv_printf(NV_DBG_ERRORS,
"NVRM: builtin PAT support disabled.\n");
}
return 0;
}
void nv_teardown_pat_support(void)
{
if (nv_pat_mode == NV_PAT_MODE_BUILTIN)
{
nv_disable_pat_support();
nvidia_unregister_cpu_hotplug_notifier();
}
}
#endif /* defined(NV_ENABLE_PAT_SUPPORT) */

View File

@@ -0,0 +1,59 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_PAT_H_
#define _NV_PAT_H_
#include "nv-linux.h"
#if defined(NV_ENABLE_PAT_SUPPORT)
extern int nv_init_pat_support(nvidia_stack_t *sp);
extern void nv_teardown_pat_support(void);
extern int nv_enable_pat_support(void);
extern void nv_disable_pat_support(void);
#else
static inline int nv_init_pat_support(nvidia_stack_t *sp)
{
(void)sp;
return 0;
}
static inline void nv_teardown_pat_support(void)
{
return;
}
static inline int nv_enable_pat_support(void)
{
return 1;
}
static inline void nv_disable_pat_support(void)
{
return;
}
#endif
#endif /* _NV_PAT_H_ */

View File

@@ -0,0 +1,79 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include "nv-pci-table.h"
/* Devices supported by RM */
struct pci_device_id nv_pci_table[] = {
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_VGA << 8),
.class_mask = ~0
},
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_3D << 8),
.class_mask = ~0
},
{ }
};
/* Devices supported by all drivers in nvidia.ko */
struct pci_device_id nv_module_device_table[] = {
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_VGA << 8),
.class_mask = ~0
},
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_DISPLAY_3D << 8),
.class_mask = ~0
},
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_BRIDGE_OTHER << 8),
.class_mask = ~0
},
{ }
};
MODULE_DEVICE_TABLE(pci, nv_module_device_table);

View File

@@ -0,0 +1,31 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_PCI_TABLE_H_
#define _NV_PCI_TABLE_H_
#include <linux/pci.h>
extern struct pci_device_id nv_pci_table[];
#endif /* _NV_PCI_TABLE_H_ */

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,122 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
/*!
* @brief Unpowergate the display.
*
* Increment the device's usage counter, run pm_request_resume(dev)
* and return its result.
*
* For more details on runtime pm functions, please check the below
* files in the Linux kernel:
*
* include/linux/pm_runtime.h
* include/linux/pm.h
* or
* https://www.kernel.org/doc/Documentation/power/runtime_pm.txt
*
* pm_request_resume() submits a request to execute the subsystem-level
* resume callback for the device (the request is represented by a work
* item in pm_wq); returns 0 on success, 1 if the device's runtime PM
* status was already 'active', or error code if the request hasn't
* been queued up.
*
* @param[in] nv Per gpu linux state
*
* @returns NV_STATUS
*/
NV_STATUS NV_API_CALL nv_soc_pm_unpowergate(
nv_state_t *nv)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvS32 ret = -EBUSY;
ret = pm_runtime_get(nvl->dev);
if (ret == 1)
{
nv_printf(NV_DBG_INFO, "NVRM: device was already unpowergated\n");
}
else if (ret == -EINPROGRESS)
{
/*
* pm_runtime_get() internally calls __pm_runtime_resume(...RPM_ASYNC)
* which internally calls rpm_resume() and this function will throw
* "-EINPROGRESS" if it is being called when device state is
* RPM_RESUMING and RPM_ASYNC or RPM_NOWAIT is set.
*/
nv_printf(NV_DBG_INFO, "NVRM: device is already unpowergating\n");
}
else if (ret < 0)
{
nv_printf(NV_DBG_ERRORS, "NVRM: unpowergate unsuccessful. ret: %d\n", ret);
return NV_ERR_GENERIC;
}
return NV_OK;
}
/*!
* @brief Powergate the display.
*
* Decrement the device's usage counter; if the result is 0 then run
* pm_request_idle(dev) and return its result.
*
* For more details on runtime pm functions, please check the below
* files in the Linux kernel:
*
* include/linux/pm_runtime.h
* include/linux/pm.h
* or
* https://www.kernel.org/doc/Documentation/power/runtime_pm.txt
*
* @param[in] nv Per gpu linux state
*
* @returns NV_STATUS
*/
NV_STATUS NV_API_CALL nv_soc_pm_powergate(
nv_state_t *nv)
{
NV_STATUS status = NV_ERR_GENERIC;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
NvS32 ret = -EBUSY;
ret = pm_runtime_put(nvl->dev);
if (ret == 0)
{
status = NV_OK;
}
else
{
nv_printf(NV_DBG_ERRORS, "NVRM: powergate unsuccessful. ret: %d\n", ret);
}
return status;
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,47 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#if defined(CONFIG_PROC_FS)
#include "nv-procfs-utils.h"
void
nv_procfs_unregister_all(struct proc_dir_entry *entry, struct proc_dir_entry *delimiter)
{
#if defined(NV_PROC_REMOVE_PRESENT)
proc_remove(entry);
#else
while (entry)
{
struct proc_dir_entry *next = entry->next;
if (entry->subdir)
nv_procfs_unregister_all(entry->subdir, delimiter);
remove_proc_entry(entry->name, entry->parent);
if (entry == delimiter)
break;
entry = next;
}
#endif
}
#endif

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,956 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _RM_REG_H_
#define _RM_REG_H_
#include "nvtypes.h"
/*
* use NV_REG_STRING to stringify a registry key when using that registry key
*/
#define __NV_REG_STRING(regkey) #regkey
#define NV_REG_STRING(regkey) __NV_REG_STRING(regkey)
/*
* use NV_DEFINE_REG_ENTRY and NV_DEFINE_PARAMS_TABLE_ENTRY to simplify definition
* of registry keys in the kernel module source code.
*/
#define __NV_REG_VAR(regkey) NVreg_##regkey
#if defined(NV_MODULE_PARAMETER)
#define NV_DEFINE_REG_ENTRY(regkey, default_value) \
static NvU32 __NV_REG_VAR(regkey) = (default_value); \
NV_MODULE_PARAMETER(__NV_REG_VAR(regkey))
#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \
NvU32 __NV_REG_VAR(regkey) = (default_value); \
NV_MODULE_PARAMETER(__NV_REG_VAR(regkey))
#else
#define NV_DEFINE_REG_ENTRY(regkey, default_value) \
static NvU32 __NV_REG_VAR(regkey) = (default_value)
#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \
NvU32 __NV_REG_VAR(regkey) = (default_value)
#endif
#if defined(NV_MODULE_STRING_PARAMETER)
#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \
char *__NV_REG_VAR(regkey) = (default_value); \
NV_MODULE_STRING_PARAMETER(__NV_REG_VAR(regkey))
#else
#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \
char *__NV_REG_VAR(regkey) = (default_value)
#endif
#define NV_DEFINE_PARAMS_TABLE_ENTRY(regkey) \
{ NV_REG_STRING(regkey), &__NV_REG_VAR(regkey) }
/*
* Like NV_DEFINE_PARMS_TABLE_ENTRY, but allows a mismatch between the name of
* the regkey and the name of the module parameter. When using this macro, the
* name of the parameter is passed to the extra "parameter" argument, and it is
* this name that must be used in the NV_DEFINE_REG_ENTRY() macro.
*/
#define NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(regkey, parameter) \
{ NV_REG_STRING(regkey), &__NV_REG_VAR(parameter)}
/*
*----------------- registry key definitions--------------------------
*/
/*
* Option: ModifyDeviceFiles
*
* Description:
*
* When this option is enabled, the NVIDIA driver will verify the validity
* of the NVIDIA device files in /dev and attempt to dynamically modify
* and/or (re-)create them, if necessary. If you don't wish for the NVIDIA
* driver to touch the device files, you can use this registry key.
*
* This module parameter is only honored by the NVIDIA GPU driver and NVIDIA
* capability driver. Furthermore, the NVIDIA capability driver provides
* modifiable /proc file entry (DeviceFileModify=0/1) to alter the behavior of
* this module parameter per device file.
*
* Possible Values:
* 0 = disable dynamic device file management
* 1 = enable dynamic device file management (default)
*/
#define __NV_MODIFY_DEVICE_FILES ModifyDeviceFiles
#define NV_REG_MODIFY_DEVICE_FILES NV_REG_STRING(__NV_MODIFY_DEVICE_FILES)
/*
* Option: DeviceFileUID
*
* Description:
*
* This registry key specifies the UID assigned to the NVIDIA device files
* created and/or modified by the NVIDIA driver when dynamic device file
* management is enabled.
*
* This module parameter is only honored by the NVIDIA GPU driver.
*
* The default UID is 0 ('root').
*/
#define __NV_DEVICE_FILE_UID DeviceFileUID
#define NV_REG_DEVICE_FILE_UID NV_REG_STRING(__NV_DEVICE_FILE_UID)
/*
* Option: DeviceFileGID
*
* Description:
*
* This registry key specifies the GID assigned to the NVIDIA device files
* created and/or modified by the NVIDIA driver when dynamic device file
* management is enabled.
*
* This module parameter is only honored by the NVIDIA GPU driver.
*
* The default GID is 0 ('root').
*/
#define __NV_DEVICE_FILE_GID DeviceFileGID
#define NV_REG_DEVICE_FILE_GID NV_REG_STRING(__NV_DEVICE_FILE_GID)
/*
* Option: DeviceFileMode
*
* Description:
*
* This registry key specifies the device file mode assigned to the NVIDIA
* device files created and/or modified by the NVIDIA driver when dynamic
* device file management is enabled.
*
* This module parameter is only honored by the NVIDIA GPU driver.
*
* The default mode is 0666 (octal, rw-rw-rw-).
*/
#define __NV_DEVICE_FILE_MODE DeviceFileMode
#define NV_REG_DEVICE_FILE_MODE NV_REG_STRING(__NV_DEVICE_FILE_MODE)
/*
* Option: ResmanDebugLevel
*
* Default value: ~0
*/
#define __NV_RESMAN_DEBUG_LEVEL ResmanDebugLevel
#define NV_REG_RESMAN_DEBUG_LEVEL NV_REG_STRING(__NV_RESMAN_DEBUG_LEVEL)
/*
* Option: RmLogonRC
*
* Default value: 1
*/
#define __NV_RM_LOGON_RC RmLogonRC
#define NV_REG_RM_LOGON_RC NV_REG_STRING(__NV_RM_LOGON_RC)
/*
* Option: InitializeSystemMemoryAllocations
*
* Description:
*
* The NVIDIA Linux driver normally clears system memory it allocates
* for use with GPUs or within the driver stack. This is to ensure
* that potentially sensitive data is not rendered accessible by
* arbitrary user applications.
*
* Owners of single-user systems or similar trusted configurations may
* choose to disable the aforementioned clears using this option and
* potentially improve performance.
*
* Possible values:
*
* 1 = zero out system memory allocations (default)
* 0 = do not perform memory clears
*/
#define __NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \
InitializeSystemMemoryAllocations
#define NV_REG_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \
NV_REG_STRING(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS)
/*
* Option: RegistryDwords
*
* Description:
*
* This option accepts a semicolon-separated list of key=value pairs. Each
* key name is checked against the table of static options; if a match is
* found, the static option value is overridden, but invalid options remain
* invalid. Pairs that do not match an entry in the static option table
* are passed on to the RM directly.
*
* Format:
*
* NVreg_RegistryDwords="<key=value>;<key=value>;..."
*/
#define __NV_REGISTRY_DWORDS RegistryDwords
#define NV_REG_REGISTRY_DWORDS NV_REG_STRING(__NV_REGISTRY_DWORDS)
/*
* Option: RegistryDwordsPerDevice
*
* Description:
*
* This option allows to specify registry keys per GPU device. It helps to
* control registry at GPU level of granularity. It accepts a semicolon
* separated list of key=value pairs. The first key value pair MUST be
* "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot
* number and F is the Function. This PCI BDF is used to identify which GPU to
* assign the registry keys that follows next.
* If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT
* found, then all the registry keys that follows are skipped, until we find next
* valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for
* the value of the "pci" string:
* 1) bus:slot : Domain and function defaults to 0.
* 2) domain:bus:slot : Function defaults to 0.
* 3) domain:bus:slot.func : Complete PCI dev id string.
*
* For each of the registry keys that follows, key name is checked against the
* table of static options; if a match is found, the static option value is
* overridden, but invalid options remain invalid. Pairs that do not match an
* entry in the static option table are passed on to the RM directly.
*
* Format:
*
* NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F;<key=value>;<key=value>;..; \
* pci=DDDD:BB:DD.F;<key=value>;..;"
*/
#define __NV_REGISTRY_DWORDS_PER_DEVICE RegistryDwordsPerDevice
#define NV_REG_REGISTRY_DWORDS_PER_DEVICE NV_REG_STRING(__NV_REGISTRY_DWORDS_PER_DEVICE)
#define __NV_RM_MSG RmMsg
#define NV_RM_MSG NV_REG_STRING(__NV_RM_MSG)
/*
* Option: UsePageAttributeTable
*
* Description:
*
* Enable/disable use of the page attribute table (PAT) available in
* modern x86/x86-64 processors to set the effective memory type of memory
* mappings to write-combining (WC).
*
* If enabled, an x86 processor with PAT support is present and the host
* system's Linux kernel did not configure one of the PAT entries to
* indicate the WC memory type, the driver will change the second entry in
* the PAT from its default (write-through (WT)) to WC at module load
* time. If the kernel did update one of the PAT entries, the driver will
* not modify the PAT.
*
* In both cases, the driver will honor attempts to map memory with the WC
* memory type by selecting the appropriate PAT entry using the correct
* set of PTE flags.
*
* Possible values:
*
* ~0 = use the NVIDIA driver's default logic (default)
* 1 = enable use of the PAT for WC mappings.
* 0 = disable use of the PAT for WC mappings.
*/
#define __NV_USE_PAGE_ATTRIBUTE_TABLE UsePageAttributeTable
#define NV_USE_PAGE_ATTRIBUTE_TABLE NV_REG_STRING(__NV_USE_PAGE_ATTRIBUTE_TABLE)
/*
* Option: EnableMSI
*
* Description:
*
* When this option is enabled and the host kernel supports the MSI feature,
* the NVIDIA driver will enable the PCI-E MSI capability of GPUs with the
* support for this feature instead of using PCI-E wired interrupt.
*
* Possible Values:
*
* 0 = disable MSI interrupt
* 1 = enable MSI interrupt (default)
*
*/
#define __NV_ENABLE_MSI EnableMSI
#define NV_REG_ENABLE_MSI NV_REG_STRING(__NV_ENABLE_MSI)
/*
* Option: RegisterForACPIEvents
*
* Description:
*
* When this option is enabled, the NVIDIA driver will register with the
* ACPI subsystem to receive notification of ACPI events.
*
* Possible values:
*
* 1 - register for ACPI events (default)
* 0 - do not register for ACPI events
*/
#define __NV_REGISTER_FOR_ACPI_EVENTS RegisterForACPIEvents
#define NV_REG_REGISTER_FOR_ACPI_EVENTS NV_REG_STRING(__NV_REGISTER_FOR_ACPI_EVENTS)
/*
* Option: EnablePCIeGen3
*
* Description:
*
* Due to interoperability problems seen with Kepler PCIe Gen3 capable GPUs
* when configured on SandyBridge E desktop platforms, NVIDIA feels that
* delivering a reliable, high-quality experience is not currently possible in
* PCIe Gen3 mode on all PCIe Gen3 platforms. Therefore, Quadro, Tesla and
* NVS Kepler products operate in PCIe Gen2 mode by default. You may use this
* option to enable PCIe Gen3 support.
*
* This is completely unsupported!
*
* Possible Values:
*
* 0: disable PCIe Gen3 support (default)
* 1: enable PCIe Gen3 support
*/
#define __NV_ENABLE_PCIE_GEN3 EnablePCIeGen3
#define NV_REG_ENABLE_PCIE_GEN3 NV_REG_STRING(__NV_ENABLE_PCIE_GEN3)
/*
* Option: MemoryPoolSize
*
* Description:
*
* When set to a non-zero value, this option specifies the size of the
* memory pool, given as a multiple of 1 GB, created on VMware ESXi to
* satisfy any system memory allocations requested by the NVIDIA kernel
* module.
*/
#define __NV_MEMORY_POOL_SIZE MemoryPoolSize
#define NV_REG_MEMORY_POOL_SIZE NV_REG_STRING(__NV_MEMORY_POOL_SIZE)
/*
* Option: KMallocHeapMaxSize
*
* Description:
*
* When set to a non-zero value, this option specifies the maximum size of the
* heap memory space reserved for kmalloc operations. Given as a
* multiple of 1 MB created on VMware ESXi to satisfy any system memory
* allocations requested by the NVIDIA kernel module.
*/
#define __NV_KMALLOC_HEAP_MAX_SIZE KMallocHeapMaxSize
#define NV_KMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_KMALLOC_HEAP_MAX_SIZE)
/*
* Option: VMallocHeapMaxSize
*
* Description:
*
* When set to a non-zero value, this option specifies the maximum size of the
* heap memory space reserved for vmalloc operations. Given as a
* multiple of 1 MB created on VMware ESXi to satisfy any system memory
* allocations requested by the NVIDIA kernel module.
*/
#define __NV_VMALLOC_HEAP_MAX_SIZE VMallocHeapMaxSize
#define NV_VMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_VMALLOC_HEAP_MAX_SIZE)
/*
* Option: IgnoreMMIOCheck
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will ignore
* MMIO limit check during device probe on VMWare ESXi kernel. This is
* typically necessary when VMware ESXi MMIO limit differs between any
* base version and its updates. Customer using updates can set regkey
* to avoid probe failure.
*/
#define __NV_IGNORE_MMIO_CHECK IgnoreMMIOCheck
#define NV_REG_IGNORE_MMIO_CHECK NV_REG_STRING(__NV_IGNORE_MMIO_CHECK)
/*
* Option: TCEBypassMode
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will attempt to setup
* all GPUs in "TCE bypass mode", in which DMA mappings of system memory bypass
* the IOMMU/TCE remapping hardware on IBM POWER systems. This is typically
* necessary for CUDA applications in which large system memory mappings may
* exceed the default TCE remapping capacity when operated in non-bypass mode.
*
* This option has no effect on non-POWER platforms.
*
* Possible Values:
*
* 0: system default TCE mode on all GPUs
* 1: enable TCE bypass mode on all GPUs
* 2: disable TCE bypass mode on all GPUs
*/
#define __NV_TCE_BYPASS_MODE TCEBypassMode
#define NV_REG_TCE_BYPASS_MODE NV_REG_STRING(__NV_TCE_BYPASS_MODE)
#define NV_TCE_BYPASS_MODE_DEFAULT 0
#define NV_TCE_BYPASS_MODE_ENABLE 1
#define NV_TCE_BYPASS_MODE_DISABLE 2
/*
* Option: pci
*
* Description:
*
* On Unix platforms, per GPU based registry key can be specified as:
* NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F,<per-gpu registry keys>".
* where DDDD:BB:DD.F refers to Domain:Bus:Device.Function.
* We need this key "pci" to identify what follows next is a PCI BDF identifier,
* for which the registry keys are to be applied.
*
* This define is not used on non-UNIX platforms.
*
* Possible Formats for value:
*
* 1) bus:slot : Domain and function defaults to 0.
* 2) domain:bus:slot : Function defaults to 0.
* 3) domain:bus:slot.func : Complete PCI BDF identifier string.
*/
#define __NV_PCI_DEVICE_BDF pci
#define NV_REG_PCI_DEVICE_BDF NV_REG_STRING(__NV_PCI_DEVICE_BDF)
/*
* Option: EnableStreamMemOPs
*
* Description:
*
* When this option is enabled, the CUDA driver will enable support for
* CUDA Stream Memory Operations in user-mode applications, which are so
* far required to be disabled by default due to limited support in
* devtools.
*
* Note: this is treated as a hint. MemOPs may still be left disabled by CUDA
* driver for other reasons.
*
* Possible Values:
*
* 0 = disable feature (default)
* 1 = enable feature
*/
#define __NV_ENABLE_STREAM_MEMOPS EnableStreamMemOPs
#define NV_REG_ENABLE_STREAM_MEMOPS NV_REG_STRING(__NV_ENABLE_STREAM_MEMOPS)
/*
* Option: EnableUserNUMAManagement
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will require the
* user-mode NVIDIA Persistence daemon to manage the onlining and offlining
* of its NUMA device memory.
*
* This option has no effect on platforms that do not support onlining
* device memory to a NUMA node (this feature is only supported on certain
* POWER9 systems).
*
* Possible Values:
*
* 0: disable user-mode NUMA management
* 1: enable user-mode NUMA management (default)
*/
#define __NV_ENABLE_USER_NUMA_MANAGEMENT EnableUserNUMAManagement
#define NV_REG_ENABLE_USER_NUMA_MANAGEMENT NV_REG_STRING(__NV_ENABLE_USER_NUMA_MANAGEMENT)
/*
* Option: GpuBlacklist
*
* Description:
*
* This option accepts a list of blacklisted GPUs, separated by commas, that
* cannot be attached or used. Each blacklisted GPU is identified by a UUID in
* the ASCII format with leading "GPU-". An exact match is required; no partial
* UUIDs. This regkey is deprecated and will be removed in the future. Use
* NV_REG_EXCLUDED_GPUS instead.
*/
#define __NV_GPU_BLACKLIST GpuBlacklist
#define NV_REG_GPU_BLACKLIST NV_REG_STRING(__NV_GPU_BLACKLIST)
/*
* Option: ExcludedGpus
*
* Description:
*
* This option accepts a list of excluded GPUs, separated by commas, that
* cannot be attached or used. Each excluded GPU is identified by a UUID in
* the ASCII format with leading "GPU-". An exact match is required; no partial
* UUIDs.
*/
#define __NV_EXCLUDED_GPUS ExcludedGpus
#define NV_REG_EXCLUDED_GPUS NV_REG_STRING(__NV_EXCLUDED_GPUS)
/*
* Option: NvLinkDisable
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will not attempt to
* initialize or train NVLink connections for any GPUs. System reboot is required
* for changes to take affect.
*
* This option has no effect if no GPUs support NVLink.
*
* Possible Values:
*
* 0: Do not disable NVLink (default)
* 1: Disable NVLink
*/
#define __NV_NVLINK_DISABLE NvLinkDisable
#define NV_REG_NVLINK_DISABLE NV_REG_STRING(__NV_NVLINK_DISABLE)
/*
* Option: RestrictProfilingToAdminUsers
*
* Description:
*
* When this option is enabled, the NVIDIA kernel module will prevent users
* without administrative access (i.e., the CAP_SYS_ADMIN capability) from
* using GPU performance counters.
*
* Possible Values:
*
* 0: Do not restrict GPU counters (default)
* 1: Restrict GPU counters to system administrators only
*/
#define __NV_RM_PROFILING_ADMIN_ONLY RmProfilingAdminOnly
#define __NV_RM_PROFILING_ADMIN_ONLY_PARAMETER RestrictProfilingToAdminUsers
#define NV_REG_RM_PROFILING_ADMIN_ONLY NV_REG_STRING(__NV_RM_PROFILING_ADMIN_ONLY)
/*
* Option: TemporaryFilePath
*
* Description:
*
* When specified, this option changes the location in which the
* NVIDIA kernel module will create unnamed temporary files (e.g. to
* save the contents of video memory in). The indicated file must
* be a directory. By default, temporary files are created in /tmp.
*/
#define __NV_TEMPORARY_FILE_PATH TemporaryFilePath
#define NV_REG_TEMPORARY_FILE_PATH NV_REG_STRING(__NV_TEMPORARY_FILE_PATH)
/*
* Option: PreserveVideoMemoryAllocations
*
* If enabled, this option prompts the NVIDIA kernel module to save and
* restore all video memory allocations across system power management
* cycles, i.e. suspend/resume and hibernate/restore. Otherwise,
* only select allocations are preserved.
*
* Possible Values:
*
* 0: Preserve only select video memory allocations (default)
* 1: Preserve all video memory allocations
*/
#define __NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS PreserveVideoMemoryAllocations
#define NV_REG_PRESERVE_VIDEO_MEMORY_ALLOCATIONS \
NV_REG_STRING(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS)
/*
* Option: EnableS0ixPowerManagement
*
* When this option is enabled, the NVIDIA driver will use S0ix-based
* power management for system suspend/resume, if both the platform and
* the GPU support S0ix.
*
* During system suspend, if S0ix is enabled and
* video memory usage is above the threshold configured by
* 'S0ixPowerManagementVideoMemoryThreshold', video memory will be kept
* in self-refresh mode while the rest of the GPU is powered down.
*
* Otherwise, the driver will copy video memory contents to system memory
* and power off the video memory along with the GPU.
*
* Possible Values:
*
* 0: Disable S0ix based power management (default)
* 1: Enable S0ix based power management
*/
#define __NV_ENABLE_S0IX_POWER_MANAGEMENT EnableS0ixPowerManagement
#define NV_REG_ENABLE_S0IX_POWER_MANAGEMENT \
NV_REG_STRING(__NV_ENABLE_S0IX_POWER_MANAGEMENT)
/*
* Option: S0ixPowerManagementVideoMemoryThreshold
*
* This option controls the threshold that the NVIDIA driver will use during
* S0ix-based system power management.
*
* When S0ix is enabled and the system is suspended, the driver will
* compare the amount of video memory in use with this threshold,
* to decide whether to keep video memory in self-refresh or copy video
* memory content to system memory.
*
* See the 'EnableS0ixPowerManagement' option.
*
* Values are expressed in Megabytes (1048576 bytes).
*
* Default value for this option is 256MB.
*
*/
#define __NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
S0ixPowerManagementVideoMemoryThreshold
#define NV_REG_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
NV_REG_STRING(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD)
/*
* Option: DynamicPowerManagement
*
* This option controls how aggressively the NVIDIA kernel module will manage
* GPU power through kernel interfaces.
*
* Possible Values:
*
* 0: Never allow the GPU to be powered down (default).
* 1: Power down the GPU when it is not initialized.
* 2: Power down the GPU after it has been inactive for some time.
* 3: (Default) Power down the GPU after a period of inactivity (i.e.,
* mode 2) on Ampere or later notebooks. Otherwise, do not power down
* the GPU.
*/
#define __NV_DYNAMIC_POWER_MANAGEMENT DynamicPowerManagement
#define NV_REG_DYNAMIC_POWER_MANAGEMENT \
NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT)
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_NEVER 0
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_COARSE 1
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_FINE 2
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_DEFAULT 3
/*
* Option: DynamicPowerManagementVideoMemoryThreshold
*
* This option controls the threshold that the NVIDIA driver will use
* when selecting the dynamic power management scheme.
*
* When the driver detects that the GPU is idle, it will compare the amount
* of video memory in use with this threshold.
*
* If the current video memory usage is less than the threshold, the
* driver may preserve video memory contents in system memory and power off
* the video memory along with the GPU itself, if supported. Otherwise,
* the video memory will be kept in self-refresh mode while powering down
* the rest of the GPU, if supported.
*
* Values are expressed in Megabytes (1048576 bytes).
*
* If the requested value is greater than 200MB (the default), then it
* will be capped to 200MB.
*/
#define __NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
DynamicPowerManagementVideoMemoryThreshold
#define NV_REG_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \
NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD)
/*
* Option: RegisterPCIDriver
*
* Description:
*
* When this option is enabled, the NVIDIA driver will register with
* PCI subsystem.
*
* Possible values:
*
* 1 - register as PCI driver (default)
* 0 - do not register as PCI driver
*/
#define __NV_REGISTER_PCI_DRIVER RegisterPCIDriver
#define NV_REG_REGISTER_PCI_DRIVER NV_REG_STRING(__NV_REGISTER_PCI_DRIVER)
/*
* Option: EnablePCIERelaxedOrderingMode
*
* Description:
*
* When this option is enabled, the registry key RmSetPCIERelaxedOrdering will
* be set to NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_FORCE_ENABLE, causing
* every device to set the relaxed ordering bit to 1 in all outbound MWr
* transaction-layer packets. This is equivalent to setting the regkey to
* FORCE_ENABLE as a non-per-device registry key.
*
* Possible values:
* 0 - Do not enable PCIe TLP relaxed ordering bit-setting (default)
* 1 - Enable PCIe TLP relaxed ordering bit-setting
*/
#define __NV_ENABLE_PCIE_RELAXED_ORDERING_MODE EnablePCIERelaxedOrderingMode
#define NV_REG_ENABLE_PCIE_RELAXED_ORDERING_MODE \
NV_REG_STRING(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE)
/*
* Option: EnableGpuFirmware
*
* Description:
*
* When this option is enabled, the NVIDIA driver will enable use of GPU
* firmware.
*
* Possible mode values:
* 0 - Do not enable GPU firmware
* 1 - Enable GPU firmware
* 2 - (Default) Use the default enablement policy for GPU firmware
*
* Setting this to anything other than 2 will alter driver firmware-
* enablement policies, possibly disabling GPU firmware where it would
* have otherwise been enabled by default.
*
* If this key is set globally to the system, the driver may still attempt
* to apply some policies to maintain uniform firmware modes across all
* GPUS. This may result in the driver failing initialization on some GPUs
* to maintain such a policy.
*
* If this key is set using NVreg_RegistryDwordsPerDevice, then the driver
* will attempt to honor whatever configuration is specified without applying
* additional policies. This may also result in failed GPU initialzations if
* the configuration is not possible (for example if the firmware is missing
* from the filesystem, or the GPU is not capable).
*
* Policy bits:
*
* POLICY_ALLOW_FALLBACK:
* As the normal behavior is to fail GPU initialization if this registry
* entry is set in such a way that results in an invalid configuration, if
* instead the user would like the driver to automatically try to fallback
* to initializing the failing GPU with firmware disabled, then this bit can
* be set (ex: 0x11 means try to enable GPU firmware but fall back if needed).
* Note that this can result in a mixed mode configuration (ex: GPU0 has
* firmware enabled, but GPU1 does not).
*
*/
#define __NV_ENABLE_GPU_FIRMWARE EnableGpuFirmware
#define NV_REG_ENABLE_GPU_FIRMWARE NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE)
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_MASK 0x0000000F
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DISABLED 0x00000000
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_ENABLED 0x00000001
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DEFAULT 0x00000002
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_MASK 0x000000F0
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK 0x00000010
#define NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE 0x00000012
#define NV_REG_ENABLE_GPU_FIRMWARE_INVALID_VALUE 0xFFFFFFFF
/*
* Option: EnableGpuFirmwareLogs
*
* When this option is enabled, the NVIDIA driver will send GPU firmware logs
* to the system log, when possible.
*
* Possible values:
* 0 - Do not send GPU firmware logs to the system log
* 1 - Enable sending of GPU firmware logs to the system log
* 2 - (Default) Enable sending of GPU firmware logs to the system log for
* the debug kernel driver build only
*/
#define __NV_ENABLE_GPU_FIRMWARE_LOGS EnableGpuFirmwareLogs
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE_LOGS)
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_DISABLE 0x00000000
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE 0x00000001
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG 0x00000002
/*
* Option: EnableDbgBreakpoint
*
* When this option is set to a non-zero value, and the kernel is configured
* appropriately, assertions within resman will trigger a CPU breakpoint (e.g.,
* INT3 on x86_64), assumed to be caught by an attached debugger.
*
* When this option is set to the value zero (the default), assertions within
* resman will print to the system log, but no CPU breakpoint will be triggered.
*/
#define __NV_ENABLE_DBG_BREAKPOINT EnableDbgBreakpoint
/*
* Option: OpenRmEnableUnsupportedGpus
*
* Open nvidia.ko support for features beyond what is used on Data Center GPUs
* is still fairly immature, so for now require users to opt into use of open
* nvidia.ko with a special registry key, if not on a Data Center GPU.
*/
#define __NV_OPENRM_ENABLE_UNSUPPORTED_GPUS OpenRmEnableUnsupportedGpus
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS NV_REG_STRING(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS)
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE 0x00000000
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_ENABLE 0x00000001
#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE
/*
* Option: NVreg_DmaRemapPeerMmio
*
* Description:
*
* When this option is enabled, the NVIDIA driver will use device driver
* APIs provided by the Linux kernel for DMA-remapping part of a device's
* MMIO region to another device, creating e.g., IOMMU mappings as necessary.
* When this option is disabled, the NVIDIA driver will instead only apply a
* fixed offset, which may be zero, to CPU physical addresses to produce the
* DMA address for the peer's MMIO region, and no IOMMU mappings will be
* created.
*
* This option only affects peer MMIO DMA mappings, and not system memory
* mappings.
*
* Possible Values:
* 0 = disable dynamic DMA remapping of peer MMIO regions
* 1 = enable dynamic DMA remapping of peer MMIO regions (default)
*/
#define __NV_DMA_REMAP_PEER_MMIO DmaRemapPeerMmio
#define NV_DMA_REMAP_PEER_MMIO NV_REG_STRING(__NV_DMA_REMAP_PEER_MMIO)
#define NV_DMA_REMAP_PEER_MMIO_DISABLE 0x00000000
#define NV_DMA_REMAP_PEER_MMIO_ENABLE 0x00000001
#if defined(NV_DEFINE_REGISTRY_KEY_TABLE)
/*
*---------registry key parameter declarations--------------
*/
NV_DEFINE_REG_ENTRY(__NV_RESMAN_DEBUG_LEVEL, ~0);
NV_DEFINE_REG_ENTRY(__NV_RM_LOGON_RC, 1);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MODIFY_DEVICE_FILES, 1);
NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_UID, 0);
NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_GID, 0);
NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_MODE, 0666);
NV_DEFINE_REG_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS, 1);
NV_DEFINE_REG_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE, ~0);
NV_DEFINE_REG_ENTRY(__NV_REGISTER_FOR_ACPI_EVENTS, 1);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_PCIE_GEN3, 0);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_MSI, 1);
NV_DEFINE_REG_ENTRY(__NV_TCE_BYPASS_MODE, NV_TCE_BYPASS_MODE_DEFAULT);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_STREAM_MEMOPS, 0);
NV_DEFINE_REG_ENTRY(__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER, 1);
NV_DEFINE_REG_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS, 0);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT, 0);
NV_DEFINE_REG_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 256);
NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT, 3);
NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 200);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE, NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE);
NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS, NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG);
NV_DEFINE_REG_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS, NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_USER_NUMA_MANAGEMENT, 1);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MEMORY_POOL_SIZE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_KMALLOC_HEAP_MAX_SIZE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_VMALLOC_HEAP_MAX_SIZE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IGNORE_MMIO_CHECK, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_NVLINK_DISABLE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PCI_DRIVER, 0);
NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_DBG_BREAKPOINT, 0);
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS_PER_DEVICE, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_RM_MSG, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_GPU_BLACKLIST, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_TEMPORARY_FILE_PATH, NULL);
NV_DEFINE_REG_STRING_ENTRY(__NV_EXCLUDED_GPUS, NULL);
NV_DEFINE_REG_ENTRY(__NV_DMA_REMAP_PEER_MMIO, NV_DMA_REMAP_PEER_MMIO_ENABLE);
/*
*----------------registry database definition----------------------
*/
/*
* You can enable any of the registry options disabled by default by
* editing their respective entries in the table below. The last field
* determines if the option is considered valid - in order for the
* changes to take effect, you need to recompile and reload the NVIDIA
* kernel module.
*/
nv_parm_t nv_parms[] = {
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RESMAN_DEBUG_LEVEL),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RM_LOGON_RC),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MODIFY_DEVICE_FILES),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_UID),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_GID),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_MODE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_MSI),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_FOR_ACPI_EVENTS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_GEN3),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MEMORY_POOL_SIZE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_KMALLOC_HEAP_MAX_SIZE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_VMALLOC_HEAP_MAX_SIZE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_IGNORE_MMIO_CHECK),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_TCE_BYPASS_MODE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_STREAM_MEMOPS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_USER_NUMA_MANAGEMENT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_NVLINK_DISABLE),
NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(__NV_RM_PROFILING_ADMIN_ONLY,
__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_PCI_DRIVER),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_DBG_BREAKPOINT),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS),
NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DMA_REMAP_PEER_MMIO),
{NULL, NULL}
};
#elif defined(NVRM)
extern nv_parm_t nv_parms[];
#endif /* NV_DEFINE_REGISTRY_KEY_TABLE */
#endif /* _RM_REG_H_ */

View File

@@ -0,0 +1,89 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "nv-linux.h"
#include "os-interface.h"
#include "nv-report-err.h"
nv_report_error_cb_t nv_error_cb_handle = NULL;
int nv_register_error_cb(nv_report_error_cb_t report_error_cb)
{
if (report_error_cb == NULL)
return -EINVAL;
if (nv_error_cb_handle != NULL)
return -EBUSY;
nv_error_cb_handle = report_error_cb;
return 0;
}
EXPORT_SYMBOL(nv_register_error_cb);
int nv_unregister_error_cb(void)
{
if (nv_error_cb_handle == NULL)
return -EPERM;
nv_error_cb_handle = NULL;
return 0;
}
EXPORT_SYMBOL(nv_unregister_error_cb);
struct pci_dev;
void nv_report_error(
struct pci_dev *dev,
NvU32 error_number,
const char *format,
va_list ap
)
{
va_list ap_copy;
char *buffer;
int length = 0;
int status = NV_OK;
if (nv_error_cb_handle != NULL)
{
va_copy(ap_copy, ap);
length = vsnprintf(NULL, 0, format, ap);
va_end(ap_copy);
if (length > 0)
{
status = os_alloc_mem((void *)&buffer, (length + 1)*sizeof(char));
if (status == NV_OK)
{
vsnprintf(buffer, length, format, ap);
nv_error_cb_handle(dev, error_number, buffer, length + 1);
os_free_mem(buffer);
}
}
}
}

View File

@@ -0,0 +1,66 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_REPORT_ERR_H_
#define _NV_REPORT_ERR_H_
/*
* @brief
* Callback definition for obtaining XID error string and data.
*
* @param[in] pci_dev *
* Structure describring GPU PCI device.
* @param[in] uint32_t
* XID number
* @param[in] char *
* Error string with HWERR info.
* @param[in] int
* Length of error string.
*/
typedef void (*nv_report_error_cb_t)(struct pci_dev *, uint32_t, char *, int);
/*
* @brief
* Register callback function to obtain XID error string and data.
*
* @param[in] report_error_cb
* A function pointer to recieve callback.
*
* @return
* 0 upon successful completion.
* -EINVAL callback handle is NULL.
* -EBUSY callback handle is already registered.
*/
int nv_register_error_cb(nv_report_error_cb_t report_error_cb);
/*
* @brief
* Unregisters callback function handle.
*
* @return
* 0 upon successful completion.
* -EPERM unregister not permitted on NULL callback handle.
*/
int nv_unregister_error_cb(void);
#endif /* _NV_REPORT_ERR_H_ */

View File

@@ -0,0 +1,201 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nv-linux.h"
#include "nv-rsync.h"
nv_rsync_info_t g_rsync_info;
void nv_init_rsync_info(
void
)
{
g_rsync_info.relaxed_ordering_mode = NV_FALSE;
g_rsync_info.usage_count = 0;
g_rsync_info.data = NULL;
NV_INIT_MUTEX(&g_rsync_info.lock);
}
void nv_destroy_rsync_info(
void
)
{
WARN_ON(g_rsync_info.data);
WARN_ON(g_rsync_info.usage_count);
WARN_ON(g_rsync_info.relaxed_ordering_mode);
}
int nv_get_rsync_info(
void
)
{
int mode;
int rc = 0;
down(&g_rsync_info.lock);
if (g_rsync_info.usage_count == 0)
{
if (g_rsync_info.get_relaxed_ordering_mode)
{
rc = g_rsync_info.get_relaxed_ordering_mode(&mode,
g_rsync_info.data);
if (rc != 0)
{
goto done;
}
g_rsync_info.relaxed_ordering_mode = !!mode;
}
}
g_rsync_info.usage_count++;
done:
up(&g_rsync_info.lock);
return rc;
}
void nv_put_rsync_info(
void
)
{
int mode;
down(&g_rsync_info.lock);
g_rsync_info.usage_count--;
if (g_rsync_info.usage_count == 0)
{
if (g_rsync_info.put_relaxed_ordering_mode)
{
mode = g_rsync_info.relaxed_ordering_mode;
g_rsync_info.put_relaxed_ordering_mode(mode, g_rsync_info.data);
g_rsync_info.relaxed_ordering_mode = NV_FALSE;
}
}
up(&g_rsync_info.lock);
}
int nv_register_rsync_driver(
int (*get_relaxed_ordering_mode)(int *mode, void *data),
void (*put_relaxed_ordering_mode)(int mode, void *data),
void (*wait_for_rsync)(struct pci_dev *gpu, void *data),
void *data
)
{
int rc = 0;
down(&g_rsync_info.lock);
if (g_rsync_info.get_relaxed_ordering_mode != NULL)
{
rc = -EBUSY;
goto done;
}
if (g_rsync_info.usage_count != 0)
{
rc = -EBUSY;
goto done;
}
g_rsync_info.get_relaxed_ordering_mode = get_relaxed_ordering_mode;
g_rsync_info.put_relaxed_ordering_mode = put_relaxed_ordering_mode;
g_rsync_info.wait_for_rsync = wait_for_rsync;
g_rsync_info.data = data;
done:
up(&g_rsync_info.lock);
return rc;
}
void nv_unregister_rsync_driver(
int (*get_relaxed_ordering_mode)(int *mode, void *data),
void (*put_relaxed_ordering_mode)(int mode, void *data),
void (*wait_for_rsync)(struct pci_dev *gpu, void *data),
void *data
)
{
down(&g_rsync_info.lock);
WARN_ON(g_rsync_info.usage_count != 0);
WARN_ON(g_rsync_info.get_relaxed_ordering_mode !=
get_relaxed_ordering_mode);
WARN_ON(g_rsync_info.put_relaxed_ordering_mode !=
put_relaxed_ordering_mode);
WARN_ON(g_rsync_info.wait_for_rsync != wait_for_rsync);
WARN_ON(g_rsync_info.data != data);
g_rsync_info.get_relaxed_ordering_mode = NULL;
g_rsync_info.put_relaxed_ordering_mode = NULL;
g_rsync_info.wait_for_rsync = NULL;
g_rsync_info.data = NULL;
up(&g_rsync_info.lock);
}
NvBool nv_get_rsync_relaxed_ordering_mode(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
/* shouldn't be called without opening a device */
WARN_ON(NV_ATOMIC_READ(nvl->usage_count) == 0);
/*
* g_rsync_info.relaxed_ordering_mode can be safely accessed outside of
* g_rsync_info.lock once a device is opened. During nvidia_open(), we
* lock the relaxed ordering state by ref-counting the rsync module
* through get_relaxed_ordering_mode.
*/
return g_rsync_info.relaxed_ordering_mode;
}
void nv_wait_for_rsync(
nv_state_t *nv
)
{
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
/* shouldn't be called without opening a device */
WARN_ON(NV_ATOMIC_READ(nvl->usage_count) == 0);
/*
* g_rsync_info.relaxed_ordering_mode can be safely accessed outside of
* g_rsync_info.lock once a device is opened. During nvidia_open(), we
* block unregistration of the rsync driver by ref-counting the module
* through get_relaxed_ordering_mode.
*/
if (g_rsync_info.relaxed_ordering_mode)
{
WARN_ON(g_rsync_info.wait_for_rsync == NULL);
g_rsync_info.wait_for_rsync(nvl->pci_dev, g_rsync_info.data);
}
}

View File

@@ -0,0 +1,57 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_RSYNC_H_
#define _NV_RSYNC_H_
#include "nv-linux.h"
typedef struct nv_rsync_info
{
struct semaphore lock;
uint32_t usage_count;
NvBool relaxed_ordering_mode;
int (*get_relaxed_ordering_mode)(int *mode, void *data);
void (*put_relaxed_ordering_mode)(int mode, void *data);
void (*wait_for_rsync)(struct pci_dev *gpu, void *data);
void *data;
} nv_rsync_info_t;
void nv_init_rsync_info(void);
void nv_destroy_rsync_info(void);
int nv_get_rsync_info(void);
void nv_put_rsync_info(void);
int nv_register_rsync_driver(
int (*get_relaxed_ordering_mode)(int *mode, void *data),
void (*put_relaxed_ordering_mode)(int mode, void *data),
void (*wait_for_rsync)(struct pci_dev *gpu, void *data),
void *data);
void nv_unregister_rsync_driver(
int (*get_relaxed_ordering_mode)(int *mode, void *data),
void (*put_relaxed_ordering_mode)(int mode, void *data),
void (*wait_for_rsync)(struct pci_dev *gpu, void *data),
void *data);
NvBool nv_get_rsync_relaxed_ordering_mode(nv_state_t *nv);
void nv_wait_for_rsync(nv_state_t *nv);
#endif

View File

@@ -0,0 +1,161 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
#include "nv-frontend.h"
NV_STATUS NV_API_CALL nv_add_mapping_context_to_file(
nv_state_t *nv,
nv_usermap_access_params_t *nvuap,
NvU32 prot,
void *pAllocPriv,
NvU64 pageIndex,
NvU32 fd
)
{
NV_STATUS status = NV_OK;
nv_alloc_mapping_context_t *nvamc = NULL;
nv_file_private_t *nvfp = NULL;
nv_linux_file_private_t *nvlfp = NULL;
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
void *priv = NULL;
nvfp = nv_get_file_private(fd, NV_IS_CTL_DEVICE(nv), &priv);
if (nvfp == NULL)
return NV_ERR_INVALID_ARGUMENT;
nvlfp = nv_get_nvlfp_from_nvfp(nvfp);
nvamc = &nvlfp->mmap_context;
if (nvamc->valid)
{
status = NV_ERR_STATE_IN_USE;
goto done;
}
if (NV_IS_CTL_DEVICE(nv))
{
nvamc->alloc = pAllocPriv;
nvamc->page_index = pageIndex;
}
else
{
if (NV_STATE_PTR(nvlfp->nvptr) != nv)
{
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
nvamc->mmap_start = nvuap->mmap_start;
nvamc->mmap_size = nvuap->mmap_size;
if (nv_get_numa_status(nvl) == NV_NUMA_STATUS_ONLINE)
{
nvamc->page_array = nvuap->page_array;
nvamc->num_pages = nvuap->num_pages;
}
nvamc->access_start = nvuap->access_start;
nvamc->access_size = nvuap->access_size;
nvamc->remap_prot_extra = nvuap->remap_prot_extra;
}
nvamc->prot = prot;
nvamc->valid = NV_TRUE;
nvamc->caching = nvuap->caching;
done:
nv_put_file_private(priv);
return status;
}
NV_STATUS NV_API_CALL nv_alloc_user_mapping(
nv_state_t *nv,
void *pAllocPrivate,
NvU64 pageIndex,
NvU32 pageOffset,
NvU64 size,
NvU32 protect,
NvU64 *pUserAddress,
void **ppPrivate
)
{
nv_alloc_t *at = pAllocPrivate;
if (at->flags.contig)
*pUserAddress = (at->page_table[0]->phys_addr + (pageIndex * PAGE_SIZE) + pageOffset);
else
*pUserAddress = (at->page_table[pageIndex]->phys_addr + pageOffset);
return NV_OK;
}
NV_STATUS NV_API_CALL nv_free_user_mapping(
nv_state_t *nv,
void *pAllocPrivate,
NvU64 userAddress,
void *pPrivate
)
{
return NV_OK;
}
/*
* This function adjust the {mmap,access}_{start,size} to reflect platform-specific
* mechanisms for isolating mappings at a finer granularity than the os_page_size
*/
NV_STATUS NV_API_CALL nv_get_usermap_access_params(
nv_state_t *nv,
nv_usermap_access_params_t *nvuap
)
{
NvU64 addr = nvuap->addr;
NvU64 size = nvuap->size;
nvuap->remap_prot_extra = 0;
/*
* Do verification and cache encoding based on the original
* (ostensibly smaller) mmap request, since accesses should be
* restricted to that range.
*/
if (rm_gpu_need_4k_page_isolation(nv) &&
NV_4K_PAGE_ISOLATION_REQUIRED(addr, size))
{
#if defined(NV_4K_PAGE_ISOLATION_PRESENT)
nvuap->remap_prot_extra = NV_PROT_4K_PAGE_ISOLATION;
nvuap->access_start = (NvU64)NV_4K_PAGE_ISOLATION_ACCESS_START(addr);
nvuap->access_size = NV_4K_PAGE_ISOLATION_ACCESS_LEN(addr, size);
nvuap->mmap_start = (NvU64)NV_4K_PAGE_ISOLATION_MMAP_ADDR(addr);
nvuap->mmap_size = NV_4K_PAGE_ISOLATION_MMAP_LEN(size);
#else
NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "4K page isolation required but not available!\n");
return NV_ERR_OPERATING_SYSTEM;
#endif
}
return NV_OK;
}

View File

@@ -0,0 +1,736 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "os-interface.h"
#include "nv.h"
#include "nv-linux.h"
static inline void nv_set_contig_memory_uc(nvidia_pte_t *page_ptr, NvU32 num_pages)
{
#if defined(NV_SET_MEMORY_UC_PRESENT)
struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
unsigned long addr = (unsigned long)page_address(page);
set_memory_uc(addr, num_pages);
#elif defined(NV_SET_PAGES_UC_PRESENT)
struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
set_pages_uc(page, num_pages);
#endif
}
static inline void nv_set_contig_memory_wb(nvidia_pte_t *page_ptr, NvU32 num_pages)
{
#if defined(NV_SET_MEMORY_UC_PRESENT)
struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
unsigned long addr = (unsigned long)page_address(page);
set_memory_wb(addr, num_pages);
#elif defined(NV_SET_PAGES_UC_PRESENT)
struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
set_pages_wb(page, num_pages);
#endif
}
static inline int nv_set_memory_array_type_present(NvU32 type)
{
switch (type)
{
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
case NV_MEMORY_UNCACHED:
return 1;
case NV_MEMORY_WRITEBACK:
return 1;
#endif
default:
return 0;
}
}
static inline int nv_set_pages_array_type_present(NvU32 type)
{
switch (type)
{
#if defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
case NV_MEMORY_UNCACHED:
return 1;
case NV_MEMORY_WRITEBACK:
return 1;
#endif
default:
return 0;
}
}
static inline void nv_set_memory_array_type(
unsigned long *pages,
NvU32 num_pages,
NvU32 type
)
{
switch (type)
{
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
case NV_MEMORY_UNCACHED:
set_memory_array_uc(pages, num_pages);
break;
case NV_MEMORY_WRITEBACK:
set_memory_array_wb(pages, num_pages);
break;
#endif
default:
nv_printf(NV_DBG_ERRORS,
"NVRM: %s(): type %d unimplemented\n",
__FUNCTION__, type);
break;
}
}
static inline void nv_set_pages_array_type(
struct page **pages,
NvU32 num_pages,
NvU32 type
)
{
switch (type)
{
#if defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
case NV_MEMORY_UNCACHED:
set_pages_array_uc(pages, num_pages);
break;
case NV_MEMORY_WRITEBACK:
set_pages_array_wb(pages, num_pages);
break;
#endif
default:
nv_printf(NV_DBG_ERRORS,
"NVRM: %s(): type %d unimplemented\n",
__FUNCTION__, type);
break;
}
}
static inline void nv_set_contig_memory_type(
nvidia_pte_t *page_ptr,
NvU32 num_pages,
NvU32 type
)
{
switch (type)
{
case NV_MEMORY_UNCACHED:
nv_set_contig_memory_uc(page_ptr, num_pages);
break;
case NV_MEMORY_WRITEBACK:
nv_set_contig_memory_wb(page_ptr, num_pages);
break;
default:
nv_printf(NV_DBG_ERRORS,
"NVRM: %s(): type %d unimplemented\n",
__FUNCTION__, type);
}
}
static inline void nv_set_memory_type(nv_alloc_t *at, NvU32 type)
{
NvU32 i;
NV_STATUS status = NV_OK;
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
unsigned long *pages = NULL;
#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
struct page **pages = NULL;
#else
unsigned long *pages = NULL;
#endif
nvidia_pte_t *page_ptr;
struct page *page;
if (nv_set_memory_array_type_present(type))
{
status = os_alloc_mem((void **)&pages,
at->num_pages * sizeof(unsigned long));
}
else if (nv_set_pages_array_type_present(type))
{
status = os_alloc_mem((void **)&pages,
at->num_pages * sizeof(struct page*));
}
if (status != NV_OK)
pages = NULL;
//
// If the set_{memory,page}_array_* functions are in the kernel interface,
// it's faster to use them since they work on non-contiguous memory,
// whereas the set_{memory,page}_* functions do not.
//
if (pages)
{
for (i = 0; i < at->num_pages; i++)
{
page_ptr = at->page_table[i];
page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
pages[i] = (unsigned long)page_address(page);
#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
pages[i] = page;
#endif
}
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
nv_set_memory_array_type(pages, at->num_pages, type);
#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
nv_set_pages_array_type(pages, at->num_pages, type);
#endif
os_free_mem(pages);
}
//
// If the set_{memory,page}_array_* functions aren't present in the kernel
// interface, each page has to be set individually, which has been measured
// to be ~10x slower than using the set_{memory,page}_array_* functions.
//
else
{
for (i = 0; i < at->num_pages; i++)
nv_set_contig_memory_type(at->page_table[i], 1, type);
}
}
static NvU64 nv_get_max_sysmem_address(void)
{
NvU64 global_max_pfn = 0ULL;
int node_id;
for_each_online_node(node_id)
{
global_max_pfn = max(global_max_pfn, (NvU64)node_end_pfn(node_id));
}
return ((global_max_pfn + 1) << PAGE_SHIFT) - 1;
}
static unsigned int nv_compute_gfp_mask(
nv_state_t *nv,
nv_alloc_t *at
)
{
unsigned int gfp_mask = NV_GFP_KERNEL;
struct device *dev = at->dev;
/*
* If we know that SWIOTLB is enabled (and therefore we avoid calling the
* kernel to DMA-remap the pages), or if we are using dma_direct (which may
* transparently use the SWIOTLB for pages that are unaddressable by the
* device, in kernel versions 5.0 and later), limit our allocation pool
* to the first 4GB to avoid allocating pages outside of our device's
* addressable limit.
* Also, limit the allocation to the first 4GB if explicitly requested by
* setting the "nv->force_dma32_alloc" variable.
*/
if (!nv || !nv_requires_dma_remap(nv) || nv_is_dma_direct(dev) || nv->force_dma32_alloc)
{
NvU64 max_sysmem_address = nv_get_max_sysmem_address();
if ((dev && dev->dma_mask && (*(dev->dma_mask) < max_sysmem_address)) ||
(nv && nv->force_dma32_alloc))
{
gfp_mask = NV_GFP_DMA32;
}
}
#if defined(__GFP_RETRY_MAYFAIL)
gfp_mask |= __GFP_RETRY_MAYFAIL;
#elif defined(__GFP_NORETRY)
gfp_mask |= __GFP_NORETRY;
#endif
#if defined(__GFP_ZERO)
if (at->flags.zeroed)
gfp_mask |= __GFP_ZERO;
#endif
#if defined(__GFP_THISNODE)
if (at->flags.node0)
gfp_mask |= __GFP_THISNODE;
#endif
// Compound pages are required by vm_insert_page for high-order page
// allocations
if (at->order > 0)
gfp_mask |= __GFP_COMP;
return gfp_mask;
}
/*
* This function is needed for allocating contiguous physical memory in xen
* dom0. Because of the use of xen sw iotlb in xen dom0, memory allocated by
* NV_GET_FREE_PAGES may not be machine contiguous when size is more than
* 1 page. nv_alloc_coherent_pages() will give us machine contiguous memory.
* Even though we get dma_address directly in this function, we will
* still call pci_map_page() later to get dma address. This is fine as it
* will return the same machine address.
*/
static NV_STATUS nv_alloc_coherent_pages(
nv_state_t *nv,
nv_alloc_t *at
)
{
nvidia_pte_t *page_ptr;
NvU32 i;
unsigned int gfp_mask;
unsigned long virt_addr = 0;
dma_addr_t bus_addr;
nv_linux_state_t *nvl;
struct device *dev;
if (!nv)
{
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: coherent page alloc on nvidiactl not supported\n", __FUNCTION__);
return NV_ERR_NOT_SUPPORTED;
}
nvl = NV_GET_NVL_FROM_NV_STATE(nv);
dev = nvl->dev;
gfp_mask = nv_compute_gfp_mask(nv, at);
virt_addr = (unsigned long)dma_alloc_coherent(dev,
at->num_pages * PAGE_SIZE,
&bus_addr,
gfp_mask);
if (!virt_addr)
{
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__);
return NV_ERR_NO_MEMORY;
}
for (i = 0; i < at->num_pages; i++)
{
page_ptr = at->page_table[i];
page_ptr->virt_addr = virt_addr + i * PAGE_SIZE;
page_ptr->phys_addr = virt_to_phys((void *)page_ptr->virt_addr);
page_ptr->dma_addr = bus_addr + i * PAGE_SIZE;
}
if (at->cache_type != NV_MEMORY_CACHED)
{
nv_set_contig_memory_type(at->page_table[0],
at->num_pages,
NV_MEMORY_UNCACHED);
}
at->flags.coherent = NV_TRUE;
return NV_OK;
}
static void nv_free_coherent_pages(
nv_alloc_t *at
)
{
nvidia_pte_t *page_ptr;
struct device *dev = at->dev;
page_ptr = at->page_table[0];
if (at->cache_type != NV_MEMORY_CACHED)
{
nv_set_contig_memory_type(at->page_table[0],
at->num_pages,
NV_MEMORY_WRITEBACK);
}
dma_free_coherent(dev, at->num_pages * PAGE_SIZE,
(void *)page_ptr->virt_addr, page_ptr->dma_addr);
}
NV_STATUS nv_alloc_contig_pages(
nv_state_t *nv,
nv_alloc_t *at
)
{
NV_STATUS status;
nvidia_pte_t *page_ptr;
NvU32 i, j;
unsigned int gfp_mask;
unsigned long virt_addr = 0;
NvU64 phys_addr;
struct device *dev = at->dev;
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
// TODO: This is a temporary WAR, and will be removed after fixing bug 200732409.
if (os_is_xen_dom0() || at->flags.unencrypted)
return nv_alloc_coherent_pages(nv, at);
if (!NV_SOC_IS_ISO_IOMMU_PRESENT(nv))
{
return nv_alloc_coherent_pages(nv, at);
}
at->order = get_order(at->num_pages * PAGE_SIZE);
gfp_mask = nv_compute_gfp_mask(nv, at);
if (at->flags.node0)
{
NV_ALLOC_PAGES_NODE(virt_addr, 0, at->order, gfp_mask);
}
else
{
NV_GET_FREE_PAGES(virt_addr, at->order, gfp_mask);
}
if (virt_addr == 0)
{
if (os_is_vgx_hyper())
{
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: failed to allocate memory, trying coherent memory \n", __FUNCTION__);
status = nv_alloc_coherent_pages(nv, at);
return status;
}
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__);
return NV_ERR_NO_MEMORY;
}
#if !defined(__GFP_ZERO)
if (at->flags.zeroed)
memset((void *)virt_addr, 0, (at->num_pages * PAGE_SIZE));
#endif
for (i = 0; i < at->num_pages; i++, virt_addr += PAGE_SIZE)
{
phys_addr = nv_get_kern_phys_address(virt_addr);
if (phys_addr == 0)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: %s: failed to look up physical address\n",
__FUNCTION__);
status = NV_ERR_OPERATING_SYSTEM;
goto failed;
}
page_ptr = at->page_table[i];
page_ptr->phys_addr = phys_addr;
page_ptr->page_count = NV_GET_PAGE_COUNT(page_ptr);
page_ptr->virt_addr = virt_addr;
page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr);
NV_MAYBE_RESERVE_PAGE(page_ptr);
}
if (at->cache_type != NV_MEMORY_CACHED)
{
nv_set_contig_memory_type(at->page_table[0],
at->num_pages,
NV_MEMORY_UNCACHED);
}
at->flags.coherent = NV_FALSE;
return NV_OK;
failed:
if (i > 0)
{
for (j = 0; j < i; j++)
NV_MAYBE_UNRESERVE_PAGE(at->page_table[j]);
}
page_ptr = at->page_table[0];
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
return status;
}
void nv_free_contig_pages(
nv_alloc_t *at
)
{
nvidia_pte_t *page_ptr;
unsigned int i;
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
if (at->flags.coherent)
return nv_free_coherent_pages(at);
if (at->cache_type != NV_MEMORY_CACHED)
{
nv_set_contig_memory_type(at->page_table[0],
at->num_pages,
NV_MEMORY_WRITEBACK);
}
for (i = 0; i < at->num_pages; i++)
{
page_ptr = at->page_table[i];
if (NV_GET_PAGE_COUNT(page_ptr) != page_ptr->page_count)
{
static int count = 0;
if (count++ < NV_MAX_RECURRING_WARNING_MESSAGES)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: %s: page count != initial page count (%u,%u)\n",
__FUNCTION__, NV_GET_PAGE_COUNT(page_ptr),
page_ptr->page_count);
}
}
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
}
page_ptr = at->page_table[0];
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
}
NV_STATUS nv_alloc_system_pages(
nv_state_t *nv,
nv_alloc_t *at
)
{
NV_STATUS status;
nvidia_pte_t *page_ptr;
NvU32 i, j;
unsigned int gfp_mask;
unsigned long virt_addr = 0;
NvU64 phys_addr;
struct device *dev = at->dev;
dma_addr_t bus_addr;
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %u: %u pages\n", __FUNCTION__, at->num_pages);
gfp_mask = nv_compute_gfp_mask(nv, at);
for (i = 0; i < at->num_pages; i++)
{
if (at->flags.unencrypted && (dev != NULL))
{
virt_addr = (unsigned long)dma_alloc_coherent(dev,
PAGE_SIZE,
&bus_addr,
gfp_mask);
at->flags.coherent = NV_TRUE;
}
else if (at->flags.node0)
{
NV_ALLOC_PAGES_NODE(virt_addr, 0, 0, gfp_mask);
}
else
{
NV_GET_FREE_PAGES(virt_addr, 0, gfp_mask);
}
if (virt_addr == 0)
{
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__);
status = NV_ERR_NO_MEMORY;
goto failed;
}
#if !defined(__GFP_ZERO)
if (at->flags.zeroed)
memset((void *)virt_addr, 0, PAGE_SIZE);
#endif
phys_addr = nv_get_kern_phys_address(virt_addr);
if (phys_addr == 0)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: %s: failed to look up physical address\n",
__FUNCTION__);
NV_FREE_PAGES(virt_addr, 0);
status = NV_ERR_OPERATING_SYSTEM;
goto failed;
}
#if defined(_PAGE_NX)
if (((_PAGE_NX & pgprot_val(PAGE_KERNEL)) != 0) &&
(phys_addr < 0x400000))
{
nv_printf(NV_DBG_SETUP,
"NVRM: VM: %s: discarding page @ 0x%llx\n",
__FUNCTION__, phys_addr);
--i;
continue;
}
#endif
page_ptr = at->page_table[i];
page_ptr->phys_addr = phys_addr;
page_ptr->page_count = NV_GET_PAGE_COUNT(page_ptr);
page_ptr->virt_addr = virt_addr;
//
// Use unencrypted dma_addr returned by dma_alloc_coherent() as
// nv_phys_to_dma() returns encrypted dma_addr when AMD SEV is enabled.
//
if (at->flags.coherent)
page_ptr->dma_addr = bus_addr;
else if (dev)
page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr);
else
page_ptr->dma_addr = page_ptr->phys_addr;
NV_MAYBE_RESERVE_PAGE(page_ptr);
}
if (at->cache_type != NV_MEMORY_CACHED)
nv_set_memory_type(at, NV_MEMORY_UNCACHED);
return NV_OK;
failed:
if (i > 0)
{
for (j = 0; j < i; j++)
{
page_ptr = at->page_table[j];
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
if (at->flags.coherent)
{
dma_free_coherent(dev, PAGE_SIZE, (void *)page_ptr->virt_addr,
page_ptr->dma_addr);
}
else
{
NV_FREE_PAGES(page_ptr->virt_addr, 0);
}
}
}
return status;
}
void nv_free_system_pages(
nv_alloc_t *at
)
{
nvidia_pte_t *page_ptr;
unsigned int i;
struct device *dev = at->dev;
nv_printf(NV_DBG_MEMINFO,
"NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
if (at->cache_type != NV_MEMORY_CACHED)
nv_set_memory_type(at, NV_MEMORY_WRITEBACK);
for (i = 0; i < at->num_pages; i++)
{
page_ptr = at->page_table[i];
if (NV_GET_PAGE_COUNT(page_ptr) != page_ptr->page_count)
{
static int count = 0;
if (count++ < NV_MAX_RECURRING_WARNING_MESSAGES)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: VM: %s: page count != initial page count (%u,%u)\n",
__FUNCTION__, NV_GET_PAGE_COUNT(page_ptr),
page_ptr->page_count);
}
}
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
if (at->flags.coherent)
{
dma_free_coherent(dev, PAGE_SIZE, (void *)page_ptr->virt_addr,
page_ptr->dma_addr);
}
else
{
NV_FREE_PAGES(page_ptr->virt_addr, 0);
}
}
}
NvUPtr nv_vm_map_pages(
struct page **pages,
NvU32 count,
NvBool cached,
NvBool unencrypted
)
{
NvUPtr virt_addr = 0;
if (!NV_MAY_SLEEP())
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %s: can't map %d pages, invalid context!\n",
__FUNCTION__, count);
os_dbg_breakpoint();
return virt_addr;
}
virt_addr = nv_vmap(pages, count, cached, unencrypted);
return virt_addr;
}
void nv_vm_unmap_pages(
NvUPtr virt_addr,
NvU32 count
)
{
if (!NV_MAY_SLEEP())
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %s: can't unmap %d pages at 0x%0llx, "
"invalid context!\n", __FUNCTION__, count, virt_addr);
os_dbg_breakpoint();
return;
}
nv_vunmap(virt_addr, count);
}
void nv_address_space_init_once(struct address_space *mapping)
{
#if defined(NV_ADDRESS_SPACE_INIT_ONCE_PRESENT)
address_space_init_once(mapping);
#else
memset(mapping, 0, sizeof(*mapping));
INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
#if defined(NV_ADDRESS_SPACE_HAS_RWLOCK_TREE_LOCK)
//
// The .tree_lock member variable was changed from type rwlock_t, to
// spinlock_t, on 25 July 2008, by mainline commit
// 19fd6231279be3c3bdd02ed99f9b0eb195978064.
//
rwlock_init(&mapping->tree_lock);
#else
spin_lock_init(&mapping->tree_lock);
#endif
spin_lock_init(&mapping->i_mmap_lock);
INIT_LIST_HEAD(&mapping->private_list);
spin_lock_init(&mapping->private_lock);
INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
#endif /* !NV_ADDRESS_SPACE_INIT_ONCE_PRESENT */
}

View File

@@ -0,0 +1,39 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
NvU64 NV_API_CALL nv_get_kern_phys_address(NvU64 address)
{
/* direct-mapped kernel address */
if (virt_addr_valid(address))
return __pa(address);
nv_printf(NV_DBG_ERRORS,
"NVRM: can't translate address in %s()!\n", __FUNCTION__);
return 0;
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,302 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* nv_gpu_ops.h
*
* This file defines the interface between the common RM layer
* and the OS specific platform layers. (Currently supported
* are Linux and KMD)
*
*/
#ifndef _NV_GPU_OPS_H_
#define _NV_GPU_OPS_H_
#include "nvgputypes.h"
#include "nv_uvm_types.h"
typedef struct gpuSession *gpuSessionHandle;
typedef struct gpuDevice *gpuDeviceHandle;
typedef struct gpuAddressSpace *gpuAddressSpaceHandle;
typedef struct gpuChannel *gpuChannelHandle;
typedef struct gpuObject *gpuObjectHandle;
typedef struct gpuRetainedChannel_struct gpuRetainedChannel;
NV_STATUS nvGpuOpsCreateSession(struct gpuSession **session);
NV_STATUS nvGpuOpsDestroySession(struct gpuSession *session);
NV_STATUS nvGpuOpsDeviceCreate(struct gpuSession *session,
const gpuInfo *pGpuInfo,
const NvProcessorUuid *gpuGuid,
struct gpuDevice **device,
NvBool bCreateSmcPartition);
NV_STATUS nvGpuOpsDeviceDestroy(struct gpuDevice *device);
NV_STATUS nvGpuOpsAddressSpaceCreate(struct gpuDevice *device,
NvU64 vaBase,
NvU64 vaSize,
gpuAddressSpaceHandle *vaSpace,
UvmGpuAddressSpaceInfo *vaSpaceInfo);
NV_STATUS nvGpuOpsGetP2PCaps(gpuDeviceHandle device1,
gpuDeviceHandle device2,
getP2PCapsParams *p2pCaps);
void nvGpuOpsAddressSpaceDestroy(gpuAddressSpaceHandle vaSpace);
NV_STATUS nvGpuOpsMemoryAllocFb (gpuAddressSpaceHandle vaSpace,
NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo);
NV_STATUS nvGpuOpsMemoryAllocSys (gpuAddressSpaceHandle vaSpace,
NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo);
NV_STATUS nvGpuOpsPmaAllocPages(void *pPma,
NvLength pageCount,
NvU32 pageSize,
gpuPmaAllocationOptions *pPmaAllocOptions,
NvU64 *pPages);
void nvGpuOpsPmaFreePages(void *pPma,
NvU64 *pPages,
NvLength pageCount,
NvU32 pageSize,
NvU32 flags);
NV_STATUS nvGpuOpsPmaPinPages(void *pPma,
NvU64 *pPages,
NvLength pageCount,
NvU32 pageSize,
NvU32 flags);
NV_STATUS nvGpuOpsPmaUnpinPages(void *pPma,
NvU64 *pPages,
NvLength pageCount,
NvU32 pageSize);
NV_STATUS nvGpuOpsChannelAllocate(gpuAddressSpaceHandle vaSpace,
const gpuChannelAllocParams *params,
gpuChannelHandle *channelHandle,
gpuChannelInfo *channelInfo);
NV_STATUS nvGpuOpsMemoryReopen(struct gpuAddressSpace *vaSpace,
NvHandle hSrcClient, NvHandle hSrcAllocation, NvLength length, NvU64 *gpuOffset);
void nvGpuOpsChannelDestroy(struct gpuChannel *channel);
void nvGpuOpsMemoryFree(gpuAddressSpaceHandle vaSpace,
NvU64 pointer);
NV_STATUS nvGpuOpsMemoryCpuMap(gpuAddressSpaceHandle vaSpace,
NvU64 memory, NvLength length,
void **cpuPtr, NvU32 pageSize);
void nvGpuOpsMemoryCpuUnMap(gpuAddressSpaceHandle vaSpace,
void* cpuPtr);
NV_STATUS nvGpuOpsQueryCaps(struct gpuDevice *device,
gpuCaps *caps);
NV_STATUS nvGpuOpsQueryCesCaps(struct gpuDevice *device,
gpuCesCaps *caps);
NV_STATUS nvGpuOpsDupAllocation(struct gpuAddressSpace *srcVaSpace,
NvU64 srcAddress,
struct gpuAddressSpace *dstVaSpace,
NvU64 dstVaAlignment,
NvU64 *dstAddress);
NV_STATUS nvGpuOpsDupMemory(struct gpuDevice *device,
NvHandle hClient,
NvHandle hPhysMemory,
NvHandle *hDupMemory,
gpuMemoryInfo *pGpuMemoryInfo);
NV_STATUS nvGpuOpsGetGuid(NvHandle hClient, NvHandle hDevice,
NvHandle hSubDevice, NvU8 *gpuGuid,
unsigned guidLength);
NV_STATUS nvGpuOpsGetClientInfoFromPid(unsigned pid,
const NvU8 *gpuUuid,
NvHandle *hClient,
NvHandle *hDevice,
NvHandle *hSubDevice);
NV_STATUS nvGpuOpsFreeDupedHandle(struct gpuDevice *device,
NvHandle hPhysHandle);
NV_STATUS nvGpuOpsGetAttachedGpus(NvU8 *guidList, unsigned *numGpus);
NV_STATUS nvGpuOpsGetGpuInfo(const NvProcessorUuid *gpuUuid,
const gpuClientInfo *pGpuClientInfo,
gpuInfo *pGpuInfo);
NV_STATUS nvGpuOpsGetGpuIds(const NvU8 *pUuid, unsigned uuidLength, NvU32 *pDeviceId,
NvU32 *pSubdeviceId);
NV_STATUS nvGpuOpsOwnPageFaultIntr(struct gpuDevice *device, NvBool bOwnInterrupts);
NV_STATUS nvGpuOpsServiceDeviceInterruptsRM(struct gpuDevice *device);
NV_STATUS nvGpuOpsCheckEccErrorSlowpath(struct gpuChannel * channel, NvBool *bEccDbeSet);
NV_STATUS nvGpuOpsSetPageDirectory(struct gpuAddressSpace * vaSpace,
NvU64 physAddress, unsigned numEntries,
NvBool bVidMemAperture, NvU32 pasid);
NV_STATUS nvGpuOpsUnsetPageDirectory(struct gpuAddressSpace * vaSpace);
NV_STATUS nvGpuOpsGetGmmuFmt(struct gpuAddressSpace * vaSpace, void ** pFmt);
NV_STATUS nvGpuOpsInvalidateTlb(struct gpuAddressSpace * vaSpace);
NV_STATUS nvGpuOpsGetFbInfo(struct gpuDevice *device, gpuFbInfo * fbInfo);
NV_STATUS nvGpuOpsGetEccInfo(struct gpuDevice *device, gpuEccInfo * eccInfo);
NV_STATUS nvGpuOpsInitFaultInfo(struct gpuDevice *device, gpuFaultInfo *pFaultInfo);
NV_STATUS nvGpuOpsDestroyFaultInfo(struct gpuDevice *device,
gpuFaultInfo *pFaultInfo);
NV_STATUS nvGpuOpsHasPendingNonReplayableFaults(gpuFaultInfo *pFaultInfo, NvBool *hasPendingFaults);
NV_STATUS nvGpuOpsGetNonReplayableFaults(gpuFaultInfo *pFaultInfo, void *faultBuffer, NvU32 *numFaults);
NV_STATUS nvGpuOpsDupAddressSpace(struct gpuDevice *device,
NvHandle hUserClient,
NvHandle hUserVASpace,
struct gpuAddressSpace **vaSpace,
UvmGpuAddressSpaceInfo *vaSpaceInfo);
NV_STATUS nvGpuOpsGetPmaObject(struct gpuDevice *device,
void **pPma,
const UvmPmaStatistics **pPmaPubStats);
NV_STATUS nvGpuOpsInitAccessCntrInfo(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo);
NV_STATUS nvGpuOpsDestroyAccessCntrInfo(struct gpuDevice *device,
gpuAccessCntrInfo *pAccessCntrInfo);
NV_STATUS nvGpuOpsOwnAccessCntrIntr(struct gpuSession *session,
gpuAccessCntrInfo *pAccessCntrInfo,
NvBool bOwnInterrupts);
NV_STATUS nvGpuOpsEnableAccessCntr(struct gpuDevice *device,
gpuAccessCntrInfo *pAccessCntrInfo,
gpuAccessCntrConfig *pAccessCntrConfig);
NV_STATUS nvGpuOpsDisableAccessCntr(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo);
NV_STATUS nvGpuOpsP2pObjectCreate(struct gpuDevice *device1,
struct gpuDevice *device2,
NvHandle *hP2pObject);
NV_STATUS nvGpuOpsP2pObjectDestroy(struct gpuSession *session,
NvHandle hP2pObject);
NV_STATUS nvGpuOpsGetExternalAllocPtes(struct gpuAddressSpace *vaSpace,
NvHandle hDupedMemory,
NvU64 offset,
NvU64 size,
gpuExternalMappingInfo *pGpuExternalMappingInfo);
NV_STATUS nvGpuOpsRetainChannel(struct gpuAddressSpace *vaSpace,
NvHandle hClient,
NvHandle hChannel,
gpuRetainedChannel **retainedChannel,
gpuChannelInstanceInfo *channelInstanceInfo);
void nvGpuOpsReleaseChannel(gpuRetainedChannel *retainedChannel);
NV_STATUS nvGpuOpsBindChannelResources(gpuRetainedChannel *retainedChannel,
gpuChannelResourceBindParams *channelResourceBindParams);
void nvGpuOpsStopChannel(gpuRetainedChannel *retainedChannel, NvBool bImmediate);
NV_STATUS nvGpuOpsGetChannelResourcePtes(struct gpuAddressSpace *vaSpace,
NvP64 resourceDescriptor,
NvU64 offset,
NvU64 size,
gpuExternalMappingInfo *pGpuExternalMappingInfo);
NV_STATUS nvGpuOpsReportNonReplayableFault(struct gpuDevice *device,
const void *pFaultPacket);
// Private interface used for windows only
#if defined(NV_WINDOWS)
NV_STATUS nvGpuOpsGetRmHandleForSession(gpuSessionHandle hSession, NvHandle *hRmClient);
NV_STATUS nvGpuOpsGetRmHandleForChannel(gpuChannelHandle hChannel, NvHandle *hRmChannel);
#endif // WINDOWS
// Interface used for SR-IOV heavy
NV_STATUS nvGpuOpsPagingChannelAllocate(struct gpuDevice *device,
const gpuPagingChannelAllocParams *params,
gpuPagingChannelHandle *channelHandle,
gpuPagingChannelInfo *channelinfo);
void nvGpuOpsPagingChannelDestroy(UvmGpuPagingChannel *channel);
NV_STATUS nvGpuOpsPagingChannelsMap(struct gpuAddressSpace *srcVaSpace,
NvU64 srcAddress,
struct gpuDevice *device,
NvU64 *dstAddress);
void nvGpuOpsPagingChannelsUnmap(struct gpuAddressSpace *srcVaSpace,
NvU64 srcAddress,
struct gpuDevice *device);
NV_STATUS nvGpuOpsPagingChannelPushStream(UvmGpuPagingChannel *channel,
char *methodStream,
NvU32 methodStreamSize);
// Interface used for CCSL
NV_STATUS nvGpuOpsCcslContextInit(UvmCslContext **ctx,
gpuChannelHandle channel);
NV_STATUS nvGpuOpsCcslContextClear(UvmCslContext *ctx);
NV_STATUS nvGpuOpsCcslLogDeviceEncryption(UvmCslContext *ctx);
NV_STATUS nvGpuOpsCcslEncrypt(UvmCslContext *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *outputBuffer,
NvU8 *authTagBuffer);
NV_STATUS nvGpuOpsCcslDecrypt(UvmCslContext *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *outputBuffer,
NvU8 const *authTagBuffer);
NV_STATUS nvGpuOpsCcslSign(UvmCslContext *ctx,
NvU32 bufferSize,
NvU8 const *inputBuffer,
NvU8 *authTagBuffer);
#endif /* _NV_GPU_OPS_H_*/

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,44 @@
NVIDIA_SOURCES ?=
NVIDIA_SOURCES_CXX ?=
NVIDIA_SOURCES += nvidia/nv-platform.c
NVIDIA_SOURCES += nvidia/nv-dsi-parse-panel-props.c
NVIDIA_SOURCES += nvidia/nv-clk.c
NVIDIA_SOURCES += nvidia/nv-gpio.c
NVIDIA_SOURCES += nvidia/nv-nano-timer.c
NVIDIA_SOURCES += nvidia/nv-backlight.c
NVIDIA_SOURCES += nvidia/nv-imp.c
NVIDIA_SOURCES += nvidia/nv-host1x.c
NVIDIA_SOURCES += nvidia/nv-platform-pm.c
NVIDIA_SOURCES += nvidia/nv-ipc-soc.c
NVIDIA_SOURCES += nvidia/nv.c
NVIDIA_SOURCES += nvidia/nv-pci.c
NVIDIA_SOURCES += nvidia/nv-dmabuf.c
NVIDIA_SOURCES += nvidia/nv-acpi.c
NVIDIA_SOURCES += nvidia/nv-cray.c
NVIDIA_SOURCES += nvidia/nv-dma.c
NVIDIA_SOURCES += nvidia/nv-i2c.c
NVIDIA_SOURCES += nvidia/nv-mmap.c
NVIDIA_SOURCES += nvidia/nv-p2p.c
NVIDIA_SOURCES += nvidia/nv-pat.c
NVIDIA_SOURCES += nvidia/nv-procfs.c
NVIDIA_SOURCES += nvidia/nv-procfs-utils.c
NVIDIA_SOURCES += nvidia/nv-usermap.c
NVIDIA_SOURCES += nvidia/nv-vm.c
NVIDIA_SOURCES += nvidia/nv-vtophys.c
NVIDIA_SOURCES += nvidia/os-interface.c
NVIDIA_SOURCES += nvidia/os-mlock.c
NVIDIA_SOURCES += nvidia/os-pci.c
NVIDIA_SOURCES += nvidia/os-registry.c
NVIDIA_SOURCES += nvidia/os-usermap.c
NVIDIA_SOURCES += nvidia/nv-modeset-interface.c
NVIDIA_SOURCES += nvidia/nv-pci-table.c
NVIDIA_SOURCES += nvidia/nv-kthread-q.c
NVIDIA_SOURCES += nvidia/nv-memdbg.c
NVIDIA_SOURCES += nvidia/nv-ibmnpu.c
NVIDIA_SOURCES += nvidia/nv-report-err.c
NVIDIA_SOURCES += nvidia/nv-rsync.c
NVIDIA_SOURCES += nvidia/nv-msi.c
NVIDIA_SOURCES += nvidia/nv-caps.c
NVIDIA_SOURCES += nvidia/nv-frontend.c
NVIDIA_SOURCES += nvidia/nv_uvm_interface.c

View File

@@ -0,0 +1,261 @@
###########################################################################
# Kbuild fragment for nvidia.ko
###########################################################################
#
# Define NVIDIA_{SOURCES,OBJECTS}
#
include $(src)/nvidia/nvidia-sources.Kbuild
NVIDIA_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_SOURCES))
obj-m += nvidia.o
nvidia-y := $(NVIDIA_OBJECTS)
NVIDIA_KO = nvidia/nvidia.ko
#
# nv-kernel.o_binary is the core binary component of nvidia.ko, shared
# across all UNIX platforms. Create a symlink, "nv-kernel.o" that
# points to nv-kernel.o_binary, and add nv-kernel.o to the list of
# objects to link into nvidia.ko.
#
# Note that:
# - The kbuild "clean" rule will delete all objects in nvidia-y (which
# is why we use a symlink instead of just adding nv-kernel.o_binary
# to nvidia-y).
# - kbuild normally uses the naming convention of ".o_shipped" for
# binary files. That is not used here, because the kbuild rule to
# create the "normal" object file from ".o_shipped" does a copy, not
# a symlink. This file is quite large, so a symlink is preferred.
# - The file added to nvidia-y should be relative to gmake's cwd.
# But, the target for the symlink rule should be prepended with $(obj).
# - The "symlink" command is called using kbuild's if_changed macro to
# generate an .nv-kernel.o.cmd file which can be used on subsequent
# runs to determine if the command line to create the symlink changed
# and needs to be re-executed.
#
NVIDIA_BINARY_OBJECT := $(src)/nvidia/nv-kernel.o_binary
NVIDIA_BINARY_OBJECT_O := nvidia/nv-kernel.o
quiet_cmd_symlink = SYMLINK $@
cmd_symlink = ln -sf $< $@
targets += $(NVIDIA_BINARY_OBJECT_O)
$(obj)/$(NVIDIA_BINARY_OBJECT_O): $(NVIDIA_BINARY_OBJECT) FORCE
$(call if_changed,symlink)
nvidia-y += $(NVIDIA_BINARY_OBJECT_O)
#
# Define nvidia.ko-specific CFLAGS.
#
NVIDIA_CFLAGS += -I$(src)/nvidia
NVIDIA_CFLAGS += -DNVIDIA_UNDEF_LEGACY_BIT_MACROS
ifeq ($(NV_BUILD_TYPE),release)
NVIDIA_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG
endif
ifeq ($(NV_BUILD_TYPE),develop)
NVIDIA_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_MEM_LOGGER
endif
ifeq ($(NV_BUILD_TYPE),debug)
NVIDIA_CFLAGS += -DDEBUG -D_DEBUG -UNDEBUG -DNV_MEM_LOGGER
endif
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_OBJECTS), $(NVIDIA_CFLAGS))
#
# nv-procfs.c requires nv-compiler.h
#
NV_COMPILER_VERSION_HEADER = $(obj)/nv_compiler.h
$(NV_COMPILER_VERSION_HEADER):
@echo \#define NV_COMPILER \"`$(CC) -v 2>&1 | tail -n 1`\" > $@
$(obj)/nvidia/nv-procfs.o: $(NV_COMPILER_VERSION_HEADER)
clean-files += $(NV_COMPILER_VERSION_HEADER)
#
# Build nv-interface.o from the kernel interface layer objects, suitable
# for further processing by the top-level makefile to produce a precompiled
# kernel interface file.
#
NVIDIA_INTERFACE := nvidia/nv-interface.o
# Linux kernel v5.12 and later looks at "always-y", Linux kernel versions
# before v5.6 looks at "always"; kernel versions between v5.12 and v5.6
# look at both.
always += $(NVIDIA_INTERFACE)
always-y += $(NVIDIA_INTERFACE)
$(obj)/$(NVIDIA_INTERFACE): $(addprefix $(obj)/,$(NVIDIA_OBJECTS))
$(LD) -r -o $@ $^
#
# Register the conftests needed by nvidia.ko
#
NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_OBJECTS)
NV_CONFTEST_FUNCTION_COMPILE_TESTS += hash__remap_4k_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_uc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_uc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_array_uc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_array_uc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_cache
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_wc
NV_CONFTEST_FUNCTION_COMPILE_TESTS += sg_alloc_table
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_get_domain_bus_and_slot
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_num_physpages
NV_CONFTEST_FUNCTION_COMPILE_TESTS += efi_enabled
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pde_data
NV_CONFTEST_FUNCTION_COMPILE_TESTS += proc_remove
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pm_vt_switch_required
NV_CONFTEST_FUNCTION_COMPILE_TESTS += xen_ioemu_inject_msi
NV_CONFTEST_FUNCTION_COMPILE_TESTS += phys_to_dma
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_dma_ops
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_attr_macros
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_map_page_attrs
NV_CONFTEST_FUNCTION_COMPILE_TESTS += write_cr4
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_property
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_find_node_by_phandle
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_node_to_nid
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pnv_pci_get_npu_dev
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_ibm_chip_id
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_bus_address
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_stop_and_remove_bus_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_remove_bus_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += register_cpu_notifier
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cpuhp_setup_state
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_map_resource
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_backlight_device_by_name
NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_msix_range
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kernel_read_has_pointer_pos_arg
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kernel_write
NV_CONFTEST_FUNCTION_COMPILE_TESTS += kthread_create_on_node
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_find_matching_node
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dev_is_pci
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_direct_map_resource
NV_CONFTEST_FUNCTION_COMPILE_TESTS += tegra_get_platform
NV_CONFTEST_FUNCTION_COMPILE_TESTS += tegra_bpmp_send_receive
NV_CONFTEST_FUNCTION_COMPILE_TESTS += flush_cache_all
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += jiffies_to_timespec
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_real_ts64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += full_name_hash
NV_CONFTEST_FUNCTION_COMPILE_TESTS += hlist_for_each_entry
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_atomic_ops_to_root
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vga_tryget
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pgprot_decrypted
NV_CONFTEST_FUNCTION_COMPILE_TESTS += cc_mkdec
NV_CONFTEST_FUNCTION_COMPILE_TESTS += iterate_fd
NV_CONFTEST_FUNCTION_COMPILE_TESTS += seq_read_iter
NV_CONFTEST_FUNCTION_COMPILE_TESTS += sg_page_iter_page
NV_CONFTEST_FUNCTION_COMPILE_TESTS += unsafe_follow_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_close_on_exec
NV_CONFTEST_FUNCTION_COMPILE_TESTS += add_memory_driver_managed
NV_CONFTEST_FUNCTION_COMPILE_TESTS += device_property_read_u64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_of_platform_populate
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_dma_configure
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_property_count_elems_of_size
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_property_read_variable_u8_array
NV_CONFTEST_FUNCTION_COMPILE_TESTS += i2c_new_client_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += i2c_unregister_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_named_gpio
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_gpio_request_one
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_direction_input
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_direction_output
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_get_value
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_set_value
NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_to_irq
NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_put
NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_set_bw
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_export_args
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_kmap
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_kmap_atomic
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map_atomic
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_has_dynamic_attachment
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_attachment_has_peer2peer
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_set_mask_and_coherent
NV_CONFTEST_FUNCTION_COMPILE_TESTS += acpi_bus_get_device
NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_clk_bulk_get_all
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_of_node_to_nid
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_sme_active
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_swiotlb_map_sg_attrs
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_swiotlb_dma_ops
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present___close_fd
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_close_fd
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_unused_fd
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_unused_fd_flags
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_nvhost_get_default_device
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_nvhost_syncpt_unit_interface_get_byte_offset
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_nvhost_syncpt_unit_interface_get_aperture
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dce_register_ipc_client
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dce_unregister_ipc_client
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dce_client_ipc_send_recv
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_dram_clk_to_mc_clk
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_dram_num_channels
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dram_types
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_pxm_to_node
NV_CONFTEST_TYPE_COMPILE_TESTS += file_operations
NV_CONFTEST_TYPE_COMPILE_TESTS += kuid_t
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += swiotlb_dma_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += noncoherent_swiotlb_dma_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_insert_pfn_prot
NV_CONFTEST_TYPE_COMPILE_TESTS += vmf_insert_pfn_prot
NV_CONFTEST_TYPE_COMPILE_TESTS += address_space_init_once
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += vmbus_channel_has_ringbuffer_page
NV_CONFTEST_TYPE_COMPILE_TESTS += device_driver_of_match_table
NV_CONFTEST_TYPE_COMPILE_TESTS += device_of_node
NV_CONFTEST_TYPE_COMPILE_TESTS += node_states_n_memory
NV_CONFTEST_TYPE_COMPILE_TESTS += kmem_cache_has_kobj_remove_work
NV_CONFTEST_TYPE_COMPILE_TESTS += sysfs_slab_unlink
NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops
NV_CONFTEST_TYPE_COMPILE_TESTS += timespec64
NV_CONFTEST_TYPE_COMPILE_TESTS += vmalloc_has_pgprot_t_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
NV_CONFTEST_TYPE_COMPILE_TESTS += pci_channel_state
NV_CONFTEST_TYPE_COMPILE_TESTS += pci_dev_has_ats_enabled
NV_CONFTEST_TYPE_COMPILE_TESTS += mt_device_gre
NV_CONFTEST_TYPE_COMPILE_TESTS += remove_memory_has_nid_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += add_memory_driver_managed_has_mhp_flags_arg
NV_CONFTEST_GENERIC_COMPILE_TESTS += dom0_kernel_present
NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_vgpu_kvm_build
NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_grid_build
NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_grid_csp_build
NV_CONFTEST_GENERIC_COMPILE_TESTS += get_user_pages
NV_CONFTEST_GENERIC_COMPILE_TESTS += get_user_pages_remote
NV_CONFTEST_GENERIC_COMPILE_TESTS += pin_user_pages
NV_CONFTEST_GENERIC_COMPILE_TESTS += pin_user_pages_remote
NV_CONFTEST_GENERIC_COMPILE_TESTS += pm_runtime_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += vm_fault_t
NV_CONFTEST_GENERIC_COMPILE_TESTS += pci_class_multimedia_hd_audio
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,287 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
static inline int nv_follow_pfn(struct vm_area_struct *vma,
unsigned long address,
unsigned long *pfn)
{
#if defined(NV_UNSAFE_FOLLOW_PFN_PRESENT)
return unsafe_follow_pfn(vma, address, pfn);
#else
return follow_pfn(vma, address, pfn);
#endif
}
/*!
* @brief Locates the PFNs for a user IO address range, and converts those to
* their associated PTEs.
*
* @param[in] vma VMA that contains the virtual address range given by the
* start and page count parameters.
* @param[in] start Beginning of the virtual address range of the IO PTEs.
* @param[in] page_count Number of pages containing the IO range being
* mapped.
* @param[in,out] pte_array Storage array for PTE addresses. Must be large
* enough to contain at least page_count pointers.
*
* @return NV_OK if the PTEs were identified successfully, error otherwise.
*/
static NV_STATUS get_io_ptes(struct vm_area_struct *vma,
NvUPtr start,
NvU64 page_count,
NvU64 **pte_array)
{
NvU64 i;
unsigned long pfn;
for (i = 0; i < page_count; i++)
{
if (nv_follow_pfn(vma, (start + (i * PAGE_SIZE)), &pfn) < 0)
{
return NV_ERR_INVALID_ADDRESS;
}
pte_array[i] = (NvU64 *)(pfn << PAGE_SHIFT);
if (i == 0)
continue;
//
// This interface is to be used for contiguous, uncacheable I/O regions.
// Internally, osCreateOsDescriptorFromIoMemory() checks the user-provided
// flags against this, and creates a single memory descriptor with the same
// attributes. This check ensures the actual mapping supplied matches the
// user's declaration. Ensure the PFNs represent a contiguous range,
// error if they do not.
//
if ((NvU64)pte_array[i] != (((NvU64)pte_array[i-1]) + PAGE_SIZE))
{
return NV_ERR_INVALID_ADDRESS;
}
}
return NV_OK;
}
/*!
* @brief Pins user IO pages that have been mapped to the user processes virtual
* address space with remap_pfn_range.
*
* @param[in] vma VMA that contains the virtual address range given by the
* start and the page count.
* @param[in] start Beginning of the virtual address range of the IO pages.
* @param[in] page_count Number of pages to pin from start.
* @param[in,out] page_array Storage array for pointers to the pinned pages.
* Must be large enough to contain at least page_count
* pointers.
*
* @return NV_OK if the pages were pinned successfully, error otherwise.
*/
static NV_STATUS get_io_pages(struct vm_area_struct *vma,
NvUPtr start,
NvU64 page_count,
struct page **page_array)
{
NV_STATUS rmStatus = NV_OK;
NvU64 i, pinned = 0;
unsigned long pfn;
for (i = 0; i < page_count; i++)
{
if ((nv_follow_pfn(vma, (start + (i * PAGE_SIZE)), &pfn) < 0) ||
(!pfn_valid(pfn)))
{
rmStatus = NV_ERR_INVALID_ADDRESS;
break;
}
// Page-backed memory mapped to userspace with remap_pfn_range
page_array[i] = pfn_to_page(pfn);
get_page(page_array[i]);
pinned++;
}
if (pinned < page_count)
{
for (i = 0; i < pinned; i++)
put_page(page_array[i]);
rmStatus = NV_ERR_INVALID_ADDRESS;
}
return rmStatus;
}
NV_STATUS NV_API_CALL os_lookup_user_io_memory(
void *address,
NvU64 page_count,
NvU64 **pte_array,
void **page_array
)
{
NV_STATUS rmStatus;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long pfn;
NvUPtr start = (NvUPtr)address;
void **result_array;
if (!NV_MAY_SLEEP())
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %s(): invalid context!\n", __FUNCTION__);
return NV_ERR_NOT_SUPPORTED;
}
rmStatus = os_alloc_mem((void **)&result_array, (page_count * sizeof(NvP64)));
if (rmStatus != NV_OK)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: failed to allocate page table!\n");
return rmStatus;
}
nv_mmap_read_lock(mm);
// find the first VMA which intersects the interval start_addr..end_addr-1,
vma = find_vma_intersection(mm, start, start+1);
// Verify that the given address range is contained in a single vma
if ((vma == NULL) || ((vma->vm_flags & (VM_IO | VM_PFNMAP)) == 0) ||
!((vma->vm_start <= start) &&
((vma->vm_end - start) >> PAGE_SHIFT >= page_count)))
{
nv_printf(NV_DBG_ERRORS,
"Cannot map memory with base addr 0x%llx and size of 0x%llx pages\n",
start ,page_count);
rmStatus = NV_ERR_INVALID_ADDRESS;
goto done;
}
if (nv_follow_pfn(vma, start, &pfn) < 0)
{
rmStatus = NV_ERR_INVALID_ADDRESS;
goto done;
}
if (pfn_valid(pfn))
{
rmStatus = get_io_pages(vma, start, page_count, (struct page **)result_array);
if (rmStatus == NV_OK)
*page_array = (void *)result_array;
}
else
{
rmStatus = get_io_ptes(vma, start, page_count, (NvU64 **)result_array);
if (rmStatus == NV_OK)
*pte_array = (NvU64 *)result_array;
}
done:
nv_mmap_read_unlock(mm);
if (rmStatus != NV_OK)
{
os_free_mem(result_array);
}
return rmStatus;
}
NV_STATUS NV_API_CALL os_lock_user_pages(
void *address,
NvU64 page_count,
void **page_array,
NvU32 flags
)
{
NV_STATUS rmStatus;
struct mm_struct *mm = current->mm;
struct page **user_pages;
NvU64 i, pinned;
unsigned int gup_flags = DRF_VAL(_LOCK_USER_PAGES, _FLAGS, _WRITE, flags) ? FOLL_WRITE : 0;
int ret;
if (!NV_MAY_SLEEP())
{
nv_printf(NV_DBG_ERRORS,
"NVRM: %s(): invalid context!\n", __FUNCTION__);
return NV_ERR_NOT_SUPPORTED;
}
rmStatus = os_alloc_mem((void **)&user_pages,
(page_count * sizeof(*user_pages)));
if (rmStatus != NV_OK)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: failed to allocate page table!\n");
return rmStatus;
}
nv_mmap_read_lock(mm);
ret = NV_PIN_USER_PAGES((unsigned long)address,
page_count, gup_flags, user_pages, NULL);
nv_mmap_read_unlock(mm);
pinned = ret;
if (ret < 0)
{
os_free_mem(user_pages);
return NV_ERR_INVALID_ADDRESS;
}
else if (pinned < page_count)
{
for (i = 0; i < pinned; i++)
NV_UNPIN_USER_PAGE(user_pages[i]);
os_free_mem(user_pages);
return NV_ERR_INVALID_ADDRESS;
}
*page_array = user_pages;
return NV_OK;
}
NV_STATUS NV_API_CALL os_unlock_user_pages(
NvU64 page_count,
void *page_array
)
{
NvBool write = 1;
struct page **user_pages = page_array;
NvU32 i;
for (i = 0; i < page_count; i++)
{
if (write)
set_page_dirty_lock(user_pages[i]);
NV_UNPIN_USER_PAGE(user_pages[i]);
}
os_free_mem(user_pages);
return NV_OK;
}

View File

@@ -0,0 +1,206 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
void* NV_API_CALL os_pci_init_handle(
NvU32 domain,
NvU8 bus,
NvU8 slot,
NvU8 function,
NvU16 *vendor,
NvU16 *device
)
{
struct pci_dev *dev;
unsigned int devfn = PCI_DEVFN(slot, function);
if (!NV_MAY_SLEEP())
return NULL;
dev = NV_GET_DOMAIN_BUS_AND_SLOT(domain, bus, devfn);
if (dev != NULL)
{
if (vendor) *vendor = dev->vendor;
if (device) *device = dev->device;
pci_dev_put(dev); /* TODO: Fix me! (hotplug) */
}
return (void *) dev;
}
NV_STATUS NV_API_CALL os_pci_read_byte(
void *handle,
NvU32 offset,
NvU8 *pReturnValue
)
{
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
{
*pReturnValue = 0xff;
return NV_ERR_NOT_SUPPORTED;
}
pci_read_config_byte( (struct pci_dev *) handle, offset, pReturnValue);
return NV_OK;
}
NV_STATUS NV_API_CALL os_pci_read_word(
void *handle,
NvU32 offset,
NvU16 *pReturnValue
)
{
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
{
*pReturnValue = 0xffff;
return NV_ERR_NOT_SUPPORTED;
}
pci_read_config_word( (struct pci_dev *) handle, offset, pReturnValue);
return NV_OK;
}
NV_STATUS NV_API_CALL os_pci_read_dword(
void *handle,
NvU32 offset,
NvU32 *pReturnValue
)
{
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
{
*pReturnValue = 0xffffffff;
return NV_ERR_NOT_SUPPORTED;
}
pci_read_config_dword( (struct pci_dev *) handle, offset, pReturnValue);
return NV_OK;
}
NV_STATUS NV_API_CALL os_pci_write_byte(
void *handle,
NvU32 offset,
NvU8 value
)
{
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
return NV_ERR_NOT_SUPPORTED;
pci_write_config_byte( (struct pci_dev *) handle, offset, value);
return NV_OK;
}
NV_STATUS NV_API_CALL os_pci_write_word(
void *handle,
NvU32 offset,
NvU16 value
)
{
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
return NV_ERR_NOT_SUPPORTED;
pci_write_config_word( (struct pci_dev *) handle, offset, value);
return NV_OK;
}
NV_STATUS NV_API_CALL os_pci_write_dword(
void *handle,
NvU32 offset,
NvU32 value
)
{
if (offset >= NV_PCIE_CFG_MAX_OFFSET)
return NV_ERR_NOT_SUPPORTED;
pci_write_config_dword( (struct pci_dev *) handle, offset, value);
return NV_OK;
}
NvBool NV_API_CALL os_pci_remove_supported(void)
{
#if defined NV_PCI_STOP_AND_REMOVE_BUS_DEVICE
return NV_TRUE;
#else
return NV_FALSE;
#endif
}
void NV_API_CALL os_pci_remove(
void *handle
)
{
#if defined(NV_PCI_STOP_AND_REMOVE_BUS_DEVICE)
NV_PCI_STOP_AND_REMOVE_BUS_DEVICE(handle);
#elif defined(DEBUG)
nv_printf(NV_DBG_ERRORS,
"NVRM: %s() is called even though NV_PCI_STOP_AND_REMOVE_BUS_DEVICE is not defined\n",
__FUNCTION__);
os_dbg_breakpoint();
#endif
}

View File

@@ -0,0 +1,336 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2000-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#define NV_DEFINE_REGISTRY_KEY_TABLE
#include "os-interface.h"
#include "nv-linux.h"
#include "nv-reg.h"
#include "nv-gpu-info.h"
/*!
* @brief This function parses the PCI BDF identifier string and returns the
* Domain, Bus, Device and function components from the PCI BDF string.
*
* This parser is highly adaptable and hence allows PCI BDF string in following
* 3 formats.
*
* 1) bus:slot : Domain and function defaults to 0.
* 2) domain:bus:slot : Function defaults to 0.
* 3) domain:bus:slot.func : Complete PCI dev id string.
*
* @param[in] pci_dev_str String containing the BDF to be parsed.
* @param[out] pci_domain Pointer where pci_domain is to be returned.
* @param[out] pci_bus Pointer where pci_bus is to be returned.
* @param[out] pci_slot Pointer where pci_slot is to be returned.
* @param[out] pci_func Pointer where pci_func is to be returned.
*
* @return NV_TRUE if succeeds, or NV_FALSE otherwise.
*/
static NV_STATUS pci_str_to_bdf(char *pci_dev_str, NvU32 *pci_domain,
NvU32 *pci_bus, NvU32 *pci_slot, NvU32 *pci_func)
{
char *option_string = NULL;
char *token, *string;
NvU32 domain, bus, slot;
NV_STATUS status = NV_OK;
//
// remove_spaces() allocates memory, hence we need to keep a pointer
// to the original string for freeing at end of function.
//
if ((option_string = rm_remove_spaces(pci_dev_str)) == NULL)
{
// memory allocation failed, returning
return NV_ERR_GENERIC;
}
string = option_string;
if (!strlen(string) || !pci_domain || !pci_bus || !pci_slot || !pci_func)
{
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
if ((token = strsep(&string, ".")) != NULL)
{
// PCI device can have maximum 8 functions only.
if ((string != NULL) && (!(*string >= '0' && *string <= '7') ||
(strlen(string) > 1)))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Invalid PCI function in token %s\n",
pci_dev_str);
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
else if (string == NULL)
{
*pci_func = 0;
}
else
{
*pci_func = (NvU32)(*string - '0');
}
domain = simple_strtoul(token, &string, 16);
if ((string == NULL) || (*string != ':') || (*(string + 1) == '\0'))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Invalid PCI domain/bus in token %s\n",
pci_dev_str);
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
token = string;
bus = simple_strtoul((token + 1), &string, 16);
if (string == NULL)
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Invalid PCI bus/slot in token %s\n",
pci_dev_str);
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
if (*string != '\0')
{
if ((*string != ':') || (*(string + 1) == '\0'))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Invalid PCI slot in token %s\n",
pci_dev_str);
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
token = string;
slot = (NvU32)simple_strtoul(token + 1, &string, 16);
if ((slot == 0) && ((token + 1) == string))
{
nv_printf(NV_DBG_ERRORS,
"NVRM: Invalid PCI slot in token %s\n",
pci_dev_str);
status = NV_ERR_INVALID_ARGUMENT;
goto done;
}
*pci_domain = domain;
*pci_bus = bus;
*pci_slot = slot;
}
else
{
*pci_slot = bus;
*pci_bus = domain;
*pci_domain = 0;
}
status = NV_OK;
}
else
{
status = NV_ERR_INVALID_ARGUMENT;
}
done:
// Freeing the memory allocated by remove_spaces().
os_free_mem(option_string);
return status;
}
/*!
* @brief This function parses the registry keys per GPU device. It accepts a
* semicolon separated list of key=value pairs. The first key value pair MUST be
* "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot
* number and F is the Function. This PCI BDF is used to identify which GPU to
* assign the registry keys that follows next.
* If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT
* found, then all the registry keys that follows are skipped, until we find next
* valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for
* the value of the "pci" string:
* 1) bus:slot : Domain and function defaults to 0.
* 2) domain:bus:slot : Function defaults to 0.
* 3) domain:bus:slot.func : Complete PCI dev id string.
*
*
* @param[in] sp pointer to nvidia_stack_t struct.
*
* @return NV_OK if succeeds, or NV_STATUS error code otherwise.
*/
NV_STATUS nv_parse_per_device_option_string(nvidia_stack_t *sp)
{
NV_STATUS status = NV_OK;
char *option_string = NULL;
char *ptr, *token;
char *name, *value;
NvU32 data, domain, bus, slot, func;
nv_linux_state_t *nvl = NULL;
nv_state_t *nv = NULL;
if (NVreg_RegistryDwordsPerDevice != NULL)
{
if ((option_string = rm_remove_spaces(NVreg_RegistryDwordsPerDevice)) == NULL)
{
return NV_ERR_GENERIC;
}
ptr = option_string;
while ((token = strsep(&ptr, ";")) != NULL)
{
if (!(name = strsep(&token, "=")) || !strlen(name))
{
continue;
}
if (!(value = strsep(&token, "=")) || !strlen(value))
{
continue;
}
if (strsep(&token, "=") != NULL)
{
continue;
}
// If this key is "pci", then value is pci_dev id string
// which needs special parsing as it is NOT a dword.
if (strcmp(name, NV_REG_PCI_DEVICE_BDF) == 0)
{
status = pci_str_to_bdf(value, &domain, &bus, &slot, &func);
// Check if PCI_DEV id string was in a valid format or NOT.
if (NV_OK != status)
{
// lets reset cached pci dev
nv = NULL;
}
else
{
nvl = find_pci(domain, bus, slot, func);
//
// If NO GPU found corresponding to this GPU, then reset
// cached state. This helps ignore the following registry
// keys until valid PCI BDF is found in the commandline.
//
if (!nvl)
{
nv = NULL;
}
else
{
nv = NV_STATE_PTR(nvl);
}
}
continue;
}
//
// Check if cached pci_dev string in the commandline is in valid
// format, else we will skip all the successive registry entries
// (<key, value> pairs) until a valid PCI_DEV string is encountered
// in the commandline.
//
if (!nv)
continue;
data = (NvU32)simple_strtoul(value, NULL, 0);
rm_write_registry_dword(sp, nv, name, data);
}
os_free_mem(option_string);
}
return status;
}
/*
* Compare given string UUID with the GpuBlacklist or ExcludedGpus registry
* parameter string and return whether the UUID is in the GPU exclusion list
*/
NvBool nv_is_uuid_in_gpu_exclusion_list(const char *uuid)
{
const char *input;
char *list;
char *ptr;
char *token;
//
// When both NVreg_GpuBlacklist and NVreg_ExcludedGpus are defined
// NVreg_ExcludedGpus takes precedence.
//
if (NVreg_ExcludedGpus != NULL)
input = NVreg_ExcludedGpus;
else if (NVreg_GpuBlacklist != NULL)
input = NVreg_GpuBlacklist;
else
return NV_FALSE;
if ((list = rm_remove_spaces(input)) == NULL)
return NV_FALSE;
ptr = list;
while ((token = strsep(&ptr, ",")) != NULL)
{
if (strcmp(token, uuid) == 0)
{
os_free_mem(list);
return NV_TRUE;
}
}
os_free_mem(list);
return NV_FALSE;
}
NV_STATUS NV_API_CALL os_registry_init(void)
{
nv_parm_t *entry;
unsigned int i;
nvidia_stack_t *sp = NULL;
if (nv_kmem_cache_alloc_stack(&sp) != 0)
{
return NV_ERR_NO_MEMORY;
}
if (NVreg_RmMsg != NULL)
{
rm_write_registry_string(sp, NULL,
"RmMsg", NVreg_RmMsg, strlen(NVreg_RmMsg));
}
rm_parse_option_string(sp, NVreg_RegistryDwords);
for (i = 0; (entry = &nv_parms[i])->name != NULL; i++)
{
rm_write_registry_dword(sp, NULL, entry->name, *entry->data);
}
nv_kmem_cache_free_stack(sp);
return NV_OK;
}

View File

@@ -0,0 +1,78 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define __NO_VERSION__
#include "os-interface.h"
#include "nv-linux.h"
void* NV_API_CALL os_map_user_space(
NvU64 start,
NvU64 size_bytes,
NvU32 mode,
NvU32 protect,
void **priv_data
)
{
return (void *)(NvUPtr)start;
}
void NV_API_CALL os_unmap_user_space(
void *address,
NvU64 size,
void *priv_data
)
{
}
NV_STATUS NV_API_CALL os_match_mmap_offset(
void *pAllocPrivate,
NvU64 offset,
NvU64 *pPageIndex
)
{
nv_alloc_t *at = pAllocPrivate;
NvU64 i;
for (i = 0; i < at->num_pages; i++)
{
if (at->flags.contig)
{
if (offset == (at->page_table[0]->phys_addr + (i * PAGE_SIZE)))
{
*pPageIndex = i;
return NV_OK;
}
}
else
{
if (offset == at->page_table[i]->phys_addr)
{
*pPageIndex = i;
return NV_OK;
}
}
}
return NV_ERR_OBJECT_NOT_FOUND;
}

View File

@@ -0,0 +1,31 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _RMP2PDEFINES_H_
#define _RMP2PDEFINES_H_
#define NVRM_P2P_PAGESIZE_SMALL_4K (4 << 10)
#define NVRM_P2P_PAGESIZE_BIG_64K (64 << 10)
#define NVRM_P2P_PAGESIZE_BIG_128K (128 << 10)
#endif