Compare commits

..

1 Commits

Author SHA1 Message Date
svcmobrel-release
a66f0feff7 Updating prebuilts and/or headers
d13779dbbab1c776db15f462cd46b29f2c0f8c7c - Makefile
7d577fdb9594ae572ff38fdda682a4796ab832ca - COPYING
5728867ce2e96b63b29367be6aa1c0e47bcafc8f - SECURITY.md
6b73bf6a534ddc0f64e8ba88739381c3b7fb4b5c - nv-compiler.sh
05e911b99b109a721d2045f025b21189e2718e60 - README.md
ec5f1eb408e0b650158e0310fb1ddd8e9b323a6f - CONTRIBUTING.md
af3ee56442f16029cb9b13537477c384226b22fc - CODE_OF_CONDUCT.md
07bd07999f296d935386a8edf719d0e296f63227 - kernel-open/Kbuild
45b68e3eacda04dcadce48a8238574302a71a3ca - kernel-open/Makefile
99f4563141af1278f13cb23a6e6c24d21d583d7b - kernel-open/conftest.sh
0b1508742a1c5a04b6c3a4be1b48b506f4180848 - kernel-open/dkms.conf
19a5da412ce1557b721b8550a4a80196f6162ba6 - kernel-open/common/inc/os_dsi_panel_props.h
4750735d6f3b334499c81d499a06a654a052713d - kernel-open/common/inc/nv-caps.h
92de3baafe321dd0dcf8665aae4614d5ac670718 - kernel-open/common/inc/rs_access.h
60ef64c0f15526ae2d786e5cec07f28570f0663b - kernel-open/common/inc/conftest.h
880e45b68b19fdb91ac94991f0e6d7fc3b406b1f - kernel-open/common/inc/nv-pci-types.h
6d2f660ef0942edf664874f260266ec81cd0ff08 - kernel-open/common/inc/nvtypes.h
c45b2faf17ca2a205c56daa11e3cb9d864be2238 - kernel-open/common/inc/nv-modeset-interface.h
5bc7a748c7d3dfa6559ca4f9fe6199e17098ec8f - kernel-open/common/inc/nv-lock.h
b249abc0a7d0c9889008e98cb2f8515a9d310b85 - kernel-open/common/inc/nvgputypes.h
e4a4f57abb8769d204468b2f5000c81f5ea7c92f - kernel-open/common/inc/nv-procfs.h
8b19b93e958aca626899f035334a4c96f8776eb6 - kernel-open/common/inc/nv.h
ede1f77acb43e28391bceac058e00a7a8d799b0d - kernel-open/common/inc/nvmisc.h
ae374d3e438f8d3b60df8c4602618c58564b73f9 - kernel-open/common/inc/rm-gpu-ops.h
3f7b20e27e6576ee1f2f0557d269697a0b8af7ec - kernel-open/common/inc/nv-firmware-registry.h
5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - kernel-open/common/inc/dce_rm_client_ipc.h
3e8075872e2efa843b74b884ef5098468edc4f18 - kernel-open/common/inc/nvimpshared.h
befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - kernel-open/common/inc/nv_stdarg.h
0e70d16576584082ee4c7f3ff9944f3bd107b1c1 - kernel-open/common/inc/cpuopsys.h
d7ab0ee225361daacd280ff98848851933a10a98 - kernel-open/common/inc/nv-list-helpers.h
b02c378ac0521c380fc2403f0520949f785b1db6 - kernel-open/common/inc/nv-dmabuf.h
a3d1e51c0f4217f1dc4cb0c48aa0eafd054d4e5e - kernel-open/common/inc/nv-procfs-utils.h
81592e5c17bebad04cd11d73672c859baa070329 - kernel-open/common/inc/nv-chardev-numbers.h
61cf8f3fd32142dc402f6802b5d4c9af6c875c35 - kernel-open/common/inc/nv-firmware.h
d5253e7e4abd3ad8d72375260aa80037adcd8973 - kernel-open/common/inc/nv_dpy_id.h
61a9589e4a8ec122e5a6c2258658d493ee747897 - kernel-open/common/inc/nv-platform.h
b986bc6591ba17a74ad81ec4c93347564c6d5165 - kernel-open/common/inc/nvkms-format.h
4f487eccd762f3ca645a685d5c333ff569e7987c - kernel-open/common/inc/nv-kthread-q-os.h
4015c4557ea0790a2bdf5695832c89e31d75aee9 - kernel-open/common/inc/nvlimits.h
143051f69a53db0e7c5d2f846a9c14d666e264b4 - kernel-open/common/inc/nv-kref.h
56f432032bef4683c2801f46bec5065923475fb1 - kernel-open/common/inc/nv-kthread-q.h
b4c5d759f035b540648117b1bff6b1701476a398 - kernel-open/common/inc/nvCpuUuid.h
67a9707c568e167bae4404c7785ed614babb7b82 - kernel-open/common/inc/nv-linux.h
7c7888550b12eeb98128ea9ac771b897327f538e - kernel-open/common/inc/nv-hypervisor.h
f9cb3701681994ff6f32833892d900b0da2b89f6 - kernel-open/common/inc/nv-pgprot.h
b8700a911ac85770bf25d70b9692308af63966bd - kernel-open/common/inc/nvstatuscodes.h
3a5f4f105672921b857fec7f2b577d9d525afe37 - kernel-open/common/inc/nv-timer.h
5cd0b3f9c7f544e9064efc9b5ba4f297e5494315 - kernel-open/common/inc/nv-time.h
7a78f354e0b68f03d6ab566d5b755e299456f361 - kernel-open/common/inc/os_gpio.h
154abd192eb950fecffcca470ee80b27f224fd79 - kernel-open/common/inc/nv-proto.h
2eb11e523a3ecba2dcd68f3146e1e666a44256ae - kernel-open/common/inc/nv-ioctl.h
1328058925b64e97588d670fe70466b31af7c7c1 - kernel-open/common/inc/nv-mm.h
25d89847c11449b329941a26f04aec955cfaf150 - kernel-open/common/inc/nv-pci.h
95bf694a98ba78d5a19e66463b8adda631e6ce4c - kernel-open/common/inc/nvstatus.h
d74a8d4a9ae3d36e92b39bc7c74b27df44626b1c - kernel-open/common/inc/nv_mig_types.h
b3258444b6a2c2399f5f00c7cac5b470c41caeaa - kernel-open/common/inc/nv-hash.h
4c856c1324060dcb5a9e72e5e82c7a60f6324733 - kernel-open/common/inc/nvkms-kapi.h
44cb5bc2bc87a5c3447bcb61f2ce5aef08c07fa7 - kernel-open/common/inc/nv_uvm_interface.h
1e7eec6561b04d2d21c3515987aaa116e9401c1f - kernel-open/common/inc/nv-kernel-interface-api.h
c54c62de441828282db9a4f5b35c2fa5c97d94f1 - kernel-open/common/inc/nvkms-api-types.h
ade7410c1c0572dbed49b4b0d97b87245ca59115 - kernel-open/common/inc/os-interface.h
2ffd0138e1b3425ade16b962c3ff02a82cde2e64 - kernel-open/common/inc/nv-ioctl-numa.h
995d8447f8539bd736cc09d62983ae8ebc7e3436 - kernel-open/common/inc/nv_common_utils.h
c75bfc368c6ce3fc2c1a0c5062834e90d822b365 - kernel-open/common/inc/nv-memdbg.h
dfd7b82a7f2939d4c1869840059705c6b71bffe3 - kernel-open/common/inc/nv-msi.h
3b12d770f8592b94a8c7774c372e80ad08c5774c - kernel-open/common/inc/nvi2c.h
894ef9e230604572bbceabdfd5f241059d54aa10 - kernel-open/common/inc/nv_speculation_barrier.h
107d1ecb8a128044260915ea259b1e64de3defea - kernel-open/common/inc/nv-ioctl-numbers.h
19cfcbf5a3021aa9aaa0ceacbb6711e7f7a6e09e - kernel-open/common/inc/nv_uvm_user_types.h
cfcd2ef5eaec92f8e4647fff02a3b7e16473cbff - kernel-open/common/inc/nv_uvm_types.h
b642fb649ce2ba17f37c8aa73f61b38f99a74986 - kernel-open/common/inc/nv-retpoline.h
3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - kernel-open/common/inc/nv-gpu-info.h
cda75171ca7d8bf920aab6d56ef9aadec16fd15d - kernel-open/common/inc/os/nv_memory_type.h
70b67003fda6bdb8a01fa1e41c3b0e25136a856c - kernel-open/common/inc/os/nv_memory_area.h
11b09260232a88aa1f73f109fdfab491a7b73576 - kernel-open/nvidia/nv-nano-timer.c
dcf4427b83cce7737f2b784d410291bf7a9612dc - kernel-open/nvidia/nv-reg.h
0b8ff957fb14f20ba86f61e556d1ab15bf5acd74 - kernel-open/nvidia/nv-imp.c
6b09b5ef8a37f78c8e82074b06b40ef593c81807 - kernel-open/nvidia/libspdm_rsa.c
b8d361216db85fe897cbced2a9600507b7708c61 - kernel-open/nvidia/libspdm_hkdf_sha.c
66e2bfc490fb77e0b72a8192b719d3dc74d25d59 - kernel-open/nvidia/nv-pat.c
26a30f2d26c2a97a6e2ee457d97d32f48b0bf25b - kernel-open/nvidia/nv-vm.c
b8a770cea0629c57d8b0b3d7414d7b0f043ee8cf - kernel-open/nvidia/libspdm_ecc.c
4c183eb39251cd78d90868ec6f75ebc7a37e6644 - kernel-open/nvidia/os-usermap.c
8c30b6230439edcbec62636cc93be512bca8637f - kernel-open/nvidia/nv-usermap.c
7af675f85642229b7e7de05dcadd622550fe7ad7 - kernel-open/nvidia/nv-vtophys.c
d11ab03a617b29efcf00f85e24ebce60f91cf82c - kernel-open/nvidia/nv-backlight.c
ef8fd76c55625aeaa71c9b789c4cf519ef6116b2 - kernel-open/nvidia/libspdm_hkdf.c
1590794925ebd9cbc14aae8c47e0cc205a3f4b52 - kernel-open/nvidia/nv-rsync.h
934a686ba8d7b77cce2d928cb3b04f611d9f9187 - kernel-open/nvidia/libspdm_aead.c
f16e6a33b5004566333fb8b99504a0fb95d51226 - kernel-open/nvidia/nv-gpio.c
8ed2c3b93eeaa52342d944e794180fd5d386688a - kernel-open/nvidia/libspdm_rsa_ext.c
2e5d18118835c19c5ca7edee9bceeae613b9d7f9 - kernel-open/nvidia/nv-procfs.c
3e820e66f556be10c0d9728d4187e43c30658736 - kernel-open/nvidia/nv.c
65fe797fb5d4af2db67544ddb79d49ab1b7ca859 - kernel-open/nvidia/nv-dsi-parse-panel-props.c
e3efae4ed920545062a2d06064df8be1a2a42135 - kernel-open/nvidia/nv-caps-imex.h
8c64e75aaaa9ac6f17aae7ed62db23eb2e5b9953 - kernel-open/nvidia/nv_uvm_interface.c
4563589496a93a2720e25807ca1be2565f03554c - kernel-open/nvidia/nv-bpmp.c
aea97021d9aa023a357f009fcddc710f710ceb5e - kernel-open/nvidia/libspdm_x509.c
f29e5bc1c7bd2c670780cdbb7275900a69f4d205 - kernel-open/nvidia/internal_crypt_lib.h
13dc24fb41516c777328d4db64fa39a9e2c40191 - kernel-open/nvidia/nv-modeset-interface.c
6ae527b69eebb44224b05e8cb3546757532d8a16 - kernel-open/nvidia/nv-dma.c
fe204e3820d206b5b0c34a51084f39b97310305a - kernel-open/nvidia/nv-ipc-soc.c
60d6ff5becc0ddbcf4b489b9d88c1dec8ccc67be - kernel-open/nvidia/nv-platform-pm.c
c1f7c81018a414b7a657431b115a1b86d3ebe3e7 - kernel-open/nvidia/os-mlock.c
c762aa186dc72ed0b9183492f9bd187c301d33d3 - kernel-open/nvidia/nv-kthread-q.c
70bece14e12b9ffc92816ee8159a4ce596579d78 - kernel-open/nvidia/os-pci.c
a677049bb56fa5ebe22fe43b0c4a12acd58a6677 - kernel-open/nvidia/nv-p2p.c
e4d12f027cb5f74124da71bbbc23bcb33651834a - kernel-open/nvidia/nv-pci-table.c
415b8f457c01417f32c998ae310b5a42dd5805cb - kernel-open/nvidia/nv-pci.c
6dfc57ac42bed97c6ff81d82e493f05b369e0b84 - kernel-open/nvidia/nvspdm_cryptlib_extensions.h
bba706cfbc04b3a880b5e661066f92e765fad663 - kernel-open/nvidia/nv-caps-imex.c
ed3c83f62e4ccc4b53d886eedd4b47518a361393 - kernel-open/nvidia/nv-dmabuf.c
66b7fad4d73a23153298ce777afb14d2c8be42c1 - kernel-open/nvidia/libspdm_internal_crypt_lib.c
6d4fbea733fdcd92fc6a8a5884e8bb359f9e8abd - kernel-open/nvidia/rmp2pdefines.h
b71bf4426322ab59e78e2a1500509a5f4b2b71ab - kernel-open/nvidia/nv-pat.h
9a5a58bd6eb71a4c32e334a1a4e3326a17143cce - kernel-open/nvidia/os-interface.c
1a91f5e6d517856303da448bea80d167b238e41c - kernel-open/nvidia/nv-i2c.c
7d409e3f0255d17457bffbf318e2f9ea160680a5 - kernel-open/nvidia/nv-pci-table.h
c50865d3070a0c3476ce24ff1ab4cc4e3f9ea4be - kernel-open/nvidia/detect-self-hosted.h
7ae9a57b9e99fd2a3534798e52e57f7784738a53 - kernel-open/nvidia/nv-report-err.c
3b27e4eaa97bd6fa71f1a075b50af69b1ec16454 - kernel-open/nvidia/libspdm_ec.c
dd9e367cba9e0672c998ec6d570be38084a365ab - kernel-open/nvidia/libspdm_rand.c
d8b8077adb7fd70eb9528d421bdef98c4378b57a - kernel-open/nvidia/nv-msi.c
1cabb1e7fa825216c09f9d2f103657b0ac2dc85a - kernel-open/nvidia/nv-platform.c
dd819a875c584bc469082fcf519779ea00b1d952 - kernel-open/nvidia/libspdm_aead_aes_gcm.c
74958745f83b14c04aaa60248bf5c86ceef6b5cb - kernel-open/nvidia/nv-acpi.c
4d19a1756af848d25fd2fd8cc691dcbcf0afb776 - kernel-open/nvidia/os-registry.c
80f9ac558a57c60cbf70f3ecaf73c71e60c98885 - kernel-open/nvidia/nv-rsync.c
7f5d251db1db4a179a67efea0178fbfda94f95d0 - kernel-open/nvidia/nv_gpu_ops.h
642c3a7d10b263ab9a63073f83ad843566927b58 - kernel-open/nvidia/libspdm_hmac_sha.c
7d53c2d27580d1b2cc56246d9972f3f310a3cd34 - kernel-open/nvidia/nv-clk.c
0f28ebcdb723e836c923e40642429838fa9e86dc - kernel-open/nvidia/nvidia-sources.Kbuild
99540efd2dfa6907b84e628e12370eefb0222850 - kernel-open/nvidia/nv-mmap.c
11ac7a3a3b4def7fa31a289f5f8461ad90eca06b - kernel-open/nvidia/nv-tracepoint.h
a14b9115cff1e5e7491737083588a5646c8c227b - kernel-open/nvidia/nv-report-err.h
011f975d4f94f7b734efa23d3c8075321eaaf0e8 - kernel-open/nvidia/nv-memdbg.c
1ba353673c266cb47ebcd07707e8ce125353e751 - kernel-open/nvidia/nvidia.Kbuild
ac976b92e83f19125d6b3f7e95d9523e430b9b09 - kernel-open/nvidia/nv-p2p.h
9b036018501d9b8543aabe7ec35dbe33023bb3e0 - kernel-open/nvidia/nv-host1x.c
11778961efc78ef488be5387fa3de0c1b761c0d9 - kernel-open/nvidia/libspdm_sha.c
02b1936dd9a9e30141245209d79b8304b7f12eb9 - kernel-open/nvidia/nv-cray.c
2d61ad39b2356c9cfd8d57c1842e80a20272e37f - kernel-open/nvidia/nv-caps.c
fc199c04b321db79ab5446574d9b994f8bfe6c24 - kernel-open/nvidia/libspdm_shash.c
fa178a7209f56008e67b553a2c5ad1b2dd383aac - kernel-open/nvidia/hal/library/cryptlib/cryptlib_rng.h
34de62da6f880ba8022299c77eddbb11d7fc68d2 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_hash.h
8af43a3f0e4201aa6ff0099221a371fb1801e818 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_rsa.h
cf94004b7b5729982806f7d6ef7cc6db53e3de56 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_aead.h
9a6e164ec60c2feb1eb8782e3028afbffe420927 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_mac.h
4991dfa8852edbdd1ffbd2d44f7b6ac4e1c8c752 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_ec.h
7694b027d74d65561ce6cd15a8c0822e4b32b73a - kernel-open/nvidia/hal/library/cryptlib/cryptlib_sm2.h
8b84a0cc1127f39652362007e048ea568c9cf80b - kernel-open/nvidia/hal/library/cryptlib/cryptlib_ecd.h
2d7b566655ba8a05fae4ea4f6c806b75d7ebb5f3 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_cert.h
0dcb1fd3982e6307b07c917cb453cddbcd1d2f43 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_dh.h
7ff12b437215b77c920a845943e4101dcde289c4 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_hkdf.h
16dd525c52448a32cc8da75d6a644d8a35efbfee - kernel-open/nvidia/library/spdm_lib_config.h
53a9acf65cad6bc4869a15d8086990365c987456 - kernel-open/nvidia/library/cryptlib.h
cfbaebb1091f7b1a8d2e3c54c2301ac45ade6c40 - kernel-open/nvidia/internal/libspdm_lib_config.h
2ea094687fbee1e116cd0362cbeba7592439e0b6 - kernel-open/nvidia-drm/nvidia-drm-crtc.h
bed7b5053d09473188061b0d7f6a3a65b64f72e0 - kernel-open/nvidia-drm/nvidia-drm-linux.c
0f8e4535cf97fadea23c9848483355583f492131 - kernel-open/nvidia-drm/nvidia-drm-utils.c
35034b6f174cd6a14b7d94a07f777794570959b4 - kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h
072e1d6a260e348dada181162949eee190321ed8 - kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c
e86dac2985f4e61f4e2676b3290e47cdcb951c46 - kernel-open/nvidia-drm/nvidia-drm-modeset.c
f00a605cac7ffc7f309e3952c5d4cea7cbfc0b7e - kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h
99642b76e9a84b5a1d2e2f4a8c7fb7bcd77a44fd - kernel-open/nvidia-drm/nvidia-drm.h
763833186eabf1a0501434426c18161febf624d4 - kernel-open/nvidia-drm/nvidia-drm-fb.h
4bada3ff7bfee8b7e222fc4cafb2ac97c67d7898 - kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h
99a2e922a448b4d76318ec151378c8bbf5971595 - kernel-open/nvidia-drm/nvidia-drm-helper.c
ae6efc1bbec8a5e948b7244f4801f0b4b398f203 - kernel-open/nvidia-drm/nvidia-drm.c
94c28482252c983fd97532634ffafea0bf77337a - kernel-open/nvidia-drm/nvidia-drm-ioctl.h
a4f77f8ce94f63f3ca2a970c1935d8da48ab5ccc - kernel-open/nvidia-drm/nvidia-drm-format.c
b78e4f40234f908e722f172485e4466d80b7b501 - kernel-open/nvidia-drm/nvidia-drm-drv.h
4154c5562cebd2747bd15fb302c19cb0cefe1c9c - kernel-open/nvidia-drm/nvidia-drm-connector.h
c762aa186dc72ed0b9183492f9bd187c301d33d3 - kernel-open/nvidia-drm/nv-kthread-q.c
e4d12f027cb5f74124da71bbbc23bcb33651834a - kernel-open/nvidia-drm/nv-pci-table.c
47110750cf788e7d9ddb5db85be3658ac660a109 - kernel-open/nvidia-drm/nvidia-drm-fence.h
73a1acab50e65c468cb71b65238a051bc306ae70 - kernel-open/nvidia-drm/nvidia-drm-encoder.h
aa388c0d44060b8586967240927306006531cdb7 - kernel-open/nvidia-drm/nvidia-drm-helper.h
d0b4f4383a7d29be40dd22e36faa96dae12d2364 - kernel-open/nvidia-drm/nvidia-drm-os-interface.h
63a2fec1f2c425e084bdc07ff05bda62ed6b6ff1 - kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c
a46422076a6a3e439349fbda4fc46e4add29b8e5 - kernel-open/nvidia-drm/nvidia-drm-drv.c
19031f2eaaaeb0fa1da61681fa6048c3e303848b - kernel-open/nvidia-drm/nvidia-drm-gem.c
71ea2d5b02bf8fb3e8cf6b7c84686e2edbc244d0 - kernel-open/nvidia-drm/nvidia-drm-encoder.c
7d409e3f0255d17457bffbf318e2f9ea160680a5 - kernel-open/nvidia-drm/nv-pci-table.h
9f57b8724205e03ca66b32fe710cd36b82932528 - kernel-open/nvidia-drm/nvidia-drm-conftest.h
6e9838b169beffe149ba12625acb496504d36d50 - kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c
d2525a36b7aec71982df80a89b861f220312103d - kernel-open/nvidia-drm/nvidia-dma-resv-helper.h
a505f0aa98ebcf438307f6bacf9bf5a5be189839 - kernel-open/nvidia-drm/nvidia-drm-connector.c
d5518597469dc874ee7e264b9400db51af2fcd44 - kernel-open/nvidia-drm/nvidia-drm-format.h
437d87e7e4bd34ae3c67b27c2faaa394575acf70 - kernel-open/nvidia-drm/nvidia-drm-priv.h
88b2035ddbba8c7f455209e61256b4e7b09c11dd - kernel-open/nvidia-drm/nvidia-drm-fence.c
eff6a0b72274c8824b7a79e9aee261da3a6fb4f1 - kernel-open/nvidia-drm/nvidia-drm-gem.h
6528efa1f8061678b8543c5c0be8761cab860858 - kernel-open/nvidia-drm/nvidia-drm-modeset.h
46a41b0b3470190abcdc57a739238a9cd773812b - kernel-open/nvidia-drm/nvidia-drm.Kbuild
995d8447f8539bd736cc09d62983ae8ebc7e3436 - kernel-open/nvidia-drm/nv_common_utils.h
40b5613d1fbbe6b74bff67a5d07974ad321f75f0 - kernel-open/nvidia-drm/nvidia-drm-utils.h
d924c494620760887546f428f87387d8ed5b99a6 - kernel-open/nvidia-drm/nvidia-drm-fb.c
5eb8385042f3efa5c2e14d168cdb40b211467552 - kernel-open/nvidia-drm/nvidia-drm-crtc.c
62a9b9b30fd7417d9ab085b2bfc731aadd9826f9 - kernel-open/nvidia-drm/nvidia-drm-os-interface.c
ca86fee8bd52e6c84e376199c5f3890078bc2031 - kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h
04ea084a5c5d496cc43103d1997053246a2fa94c - kernel-open/nvidia-modeset/nvidia-modeset-linux.c
b2a5ddfd8dcb3000b9d102bd55b5b560730e81d5 - kernel-open/nvidia-modeset/nvkms.h
c762aa186dc72ed0b9183492f9bd187c301d33d3 - kernel-open/nvidia-modeset/nv-kthread-q.c
da6fd16e29300170aba8a652ea6296241f66243b - kernel-open/nvidia-modeset/nvidia-modeset.Kbuild
2ea1436104463c5e3d177e8574c3b4298976d37e - kernel-open/nvidia-modeset/nvkms-ioctl.h
13d4f9648118dd25b790be0d8d72ebaa12cc8d0e - src/common/sdk/nvidia/inc/rs_access.h
579be4859587206460d8729804aab19180fb69bb - src/common/sdk/nvidia/inc/nvtypes.h
993f17e3094243623f793ae16bd84b5fa3f335ec - src/common/sdk/nvidia/inc/g_finn_rm_api.h
a54d77d45f9b0c5ae3fa8b59d2117145260800b6 - src/common/sdk/nvidia/inc/cc_drv.h
b249abc0a7d0c9889008e98cb2f8515a9d310b85 - src/common/sdk/nvidia/inc/nvgputypes.h
78a4b6b19a38de41527ef8b290754deca5906817 - src/common/sdk/nvidia/inc/nvcd.h
ede1f77acb43e28391bceac058e00a7a8d799b0d - src/common/sdk/nvidia/inc/nvmisc.h
46966ed7fc8d85931b49b12683c42666181f33f6 - src/common/sdk/nvidia/inc/nvimpshared.h
befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - src/common/sdk/nvidia/inc/nv_stdarg.h
f5a682339a89d2b119b43e5b9263dd67346ed3bc - src/common/sdk/nvidia/inc/cpuopsys.h
cf1de27d5bcbd0adbe3c3b64466193b7d9094c71 - src/common/sdk/nvidia/inc/nverror.h
4015c4557ea0790a2bdf5695832c89e31d75aee9 - src/common/sdk/nvidia/inc/nvlimits.h
7c7888550b12eeb98128ea9ac771b897327f538e - src/common/sdk/nvidia/inc/nv-hypervisor.h
b8700a911ac85770bf25d70b9692308af63966bd - src/common/sdk/nvidia/inc/nvstatuscodes.h
95bf694a98ba78d5a19e66463b8adda631e6ce4c - src/common/sdk/nvidia/inc/nvstatus.h
a506a41b8dcf657fb39a740ffc1dfd83835d6c89 - src/common/sdk/nvidia/inc/nvcfg_sdk.h
1e7eec6561b04d2d21c3515987aaa116e9401c1f - src/common/sdk/nvidia/inc/nv-kernel-interface-api.h
af0bc90b3ad4767de53b8ff91e246fdab0146e8b - src/common/sdk/nvidia/inc/nvsecurityinfo.h
5cec5038e1f4a395a08b765c8361a9560f3312b7 - src/common/sdk/nvidia/inc/nvdisptypes.h
c8b96af9d498f87cb9acde064648f9e84d789055 - src/common/sdk/nvidia/inc/nv_vgpu_types.h
3b12d770f8592b94a8c7774c372e80ad08c5774c - src/common/sdk/nvidia/inc/nvi2c.h
bbf6c09ef9bb10ab63d337bf011872f9073c3e5b - src/common/sdk/nvidia/inc/nvos.h
9bca638f5832d831880f090c583fac6fc8cf6ee6 - src/common/sdk/nvidia/inc/dpringbuffertypes.h
7de14a0c3cc8460a9c41e1ee32fda5409c5b9988 - src/common/sdk/nvidia/inc/mmu_fmt_types.h
774318ced0fdcb199e99cf0fee9688259dd01a51 - src/common/sdk/nvidia/inc/nvfixedtypes.h
ed51b6e2d454af3da36f9c5f4a8a7958d2c5f156 - src/common/sdk/nvidia/inc/alloc/alloc_channel.h
ffe618524466cbbff64de55d88fd987e198bb8c9 - src/common/sdk/nvidia/inc/class/cl9271.h
cef74c734fc7d2f32ff74095c59212d9e1d4cafc - src/common/sdk/nvidia/inc/class/cl84a0.h
9f8a45cb986e3ad2bd4a8900469fe5f8b0c9463a - src/common/sdk/nvidia/inc/class/cl9870.h
a6bb32861fa3f93ccb16490f0f2751a1ef333eed - src/common/sdk/nvidia/inc/class/cl0101.h
e6818f1728a66a70080e87dac15a6f92dd875b4e - src/common/sdk/nvidia/inc/class/cl927d.h
522682a17bacd5c1d6081c0020d094ee3d5c4a30 - src/common/sdk/nvidia/inc/class/clcb97.h
89d4eeb421fc2be3b9717e333e9ff67bfffa24e8 - src/common/sdk/nvidia/inc/class/cl2080.h
f558fddfdc088b86a1b479542b8e782e42a5bdce - src/common/sdk/nvidia/inc/class/clc37a.h
d301edef2d1dd42382670e5a6ceef0d8caf67d28 - src/common/sdk/nvidia/inc/class/cl90cd.h
1dfae8f11f8e92908f59a1c9493e84ce40d53b90 - src/common/sdk/nvidia/inc/class/cl0070.h
95d99f0805c8451f0f221483b3618e4dbd1e1dd8 - src/common/sdk/nvidia/inc/class/cl90f1.h
99a34eee22f584d5dfb49c3018a8cb9a7b1035ed - src/common/sdk/nvidia/inc/class/cl5070_notification.h
c4f090f0dae5bdebf28c514c1b5a9bd8606aa56c - src/common/sdk/nvidia/inc/class/cl9097.h
4b77798281f3754a80961308d44a70b1a717283b - src/common/sdk/nvidia/inc/class/clc46f.h
bd2a88f8dbc64add00ad366aa3e76d116cb090b3 - src/common/sdk/nvidia/inc/class/cl0073.h
e587a693bc1cee68983a7039ddbf16a3d3461d64 - src/common/sdk/nvidia/inc/class/cl9471.h
ddbffcce44afa7c07924fd64a608f7f3fe608ccc - src/common/sdk/nvidia/inc/class/cl0071.h
74c75472658eea77d031bf3979dd7fe695b4293f - src/common/sdk/nvidia/inc/class/cl0092_callback.h
fd16daebcd23a680b988dde4ae99625434dcb8fa - src/common/sdk/nvidia/inc/class/cl0000.h
c2d8bb02052e80cd0d11695e734f5e05ab7faeb5 - src/common/sdk/nvidia/inc/class/cl907dswspare.h
5ca1d01dab6b9e814160ddce868d00aa9a1ead58 - src/common/sdk/nvidia/inc/class/clc873.h
7c7406d40a09372dcae2aaf3fcad225c3dd2cf3f - src/common/sdk/nvidia/inc/class/cl9010_callback.h
2240664ad950c9c2e64b6f4d18e05349bc91443c - src/common/sdk/nvidia/inc/class/clc573.h
593384ce8938ceeec46c782d6869eda3c7b8c274 - src/common/sdk/nvidia/inc/class/cl900e.h
101da471fe4e167815425793491e43193e407d9a - src/common/sdk/nvidia/inc/class/clc397.h
dec74b9cf8062f1a0a8bbeca58b4f98722fd94b0 - src/common/sdk/nvidia/inc/class/cl0076.h
46f74fc51a7ec532330e966cad032782e80808b8 - src/common/sdk/nvidia/inc/class/clcc7b.h
053e3c0de24348d3f7e7fe9cbd1743f46be7a978 - src/common/sdk/nvidia/inc/class/cl0004.h
71e34a03bcfa70edfbec4dbdeade82a932057938 - src/common/sdk/nvidia/inc/class/clc637.h
447fe99b23c5dbe3d2a7601e8228a1a1831c6705 - src/common/sdk/nvidia/inc/class/clcc70.h
89ed6dd37fca994e18e03a5410d865b88e1ff776 - src/common/sdk/nvidia/inc/class/clc87e.h
03d873c3a0e0376440f23171640d9c517f7a34e9 - src/common/sdk/nvidia/inc/class/cl902d.h
78259dc2a70da76ef222ac2dc460fe3caa32457a - src/common/sdk/nvidia/inc/class/clc37e.h
b7a5b31a8c3606aa98ba823e37e21520b55ba95c - src/common/sdk/nvidia/inc/class/cl402c.h
5ee1adc8d952212b37211c6f4f677ba672f5117c - src/common/sdk/nvidia/inc/class/clcc71.h
bd12f7cdc3a01668b9c486dc6456f9263dd459ea - src/common/sdk/nvidia/inc/class/clc57b.h
4b2f2194a1655cc6ae707866f130bbe357d0c21f - src/common/sdk/nvidia/inc/class/clb097tex.h
5409e5af182ac18ef8d13380bdfe7cf2e83d37d7 - src/common/sdk/nvidia/inc/class/clc37b.h
aeb4cbab8d1d0fbd0a5747fa36d6f56c00234b2d - src/common/sdk/nvidia/inc/class/clc097tex.h
36fd6906e2688dad2e7ab648be7e070b9eb6f11d - src/common/sdk/nvidia/inc/class/clc971.h
513c505274565fa25c5a80f88a7d361ffbcb08c3 - src/common/sdk/nvidia/inc/class/cl0005.h
53e6252cd85a60698c49a721f4e41da1cb14e5bd - src/common/sdk/nvidia/inc/class/clc97dswspare.h
645adeb829dbcf315bf67ff8387e7a5d982d7b6e - src/common/sdk/nvidia/inc/class/cl00de.h
0f91db32d9e346b4d9f3762c9e59a8f8e5fd0903 - src/common/sdk/nvidia/inc/class/clcc7d.h
a24c2a943c7ceceb8d015f5cd02148f8c4e7c23d - src/common/sdk/nvidia/inc/class/clb097.h
691bb932ea3f60d2b9ad3e4d7fa53ab1a2a5e6c5 - src/common/sdk/nvidia/inc/class/clc870.h
758e2fb8b5d89079f03be09d74964e9246cb180c - src/common/sdk/nvidia/inc/class/clc797.h
f4af32374be4d05a2e55c97053a4f0d1f4b85154 - src/common/sdk/nvidia/inc/class/cl0000_notification.h
1e578eb23dacca047e0b342cce3024b3134f8de9 - src/common/sdk/nvidia/inc/class/clc7b5.h
941a031920c0b3bb16473a6a3d4ba8c52c1259d7 - src/common/sdk/nvidia/inc/class/cl917e.h
b23cdfb66f40c6d9a903f602b8ff4526063b5a2d - src/common/sdk/nvidia/inc/class/clc097.h
0de3548dde4e076cbd0446330b2d5ae4862c1501 - src/common/sdk/nvidia/inc/class/clc973.h
ddb996ff90b80c0f58729b9ac89fa6d2d3950e49 - src/common/sdk/nvidia/inc/class/cla16f.h
cb610aaae807d182b4a2ee46b9b43ebfa4a49a08 - src/common/sdk/nvidia/inc/class/clc57e.h
9e1d2f90d77e23f1d2163a8f8d8d747058e21947 - src/common/sdk/nvidia/inc/class/cl9010.h
7a14243de2b228f086810f968a1712627f1333fd - src/common/sdk/nvidia/inc/class/clc36f.h
7c8e1f1055f9522cfb2935ea0aae612ef172c26e - src/common/sdk/nvidia/inc/class/clc370_notification.h
64ad2ab88e2006bcdace06e7109981496c39f265 - src/common/sdk/nvidia/inc/class/clc87d.h
36c6162356ac39346c8900b1e0074e4b614d4b5a - src/common/sdk/nvidia/inc/class/clc370.h
5df0ce4eb733554e963eb3c7938396f58f2dd4d5 - src/common/sdk/nvidia/inc/class/cl2081.h
a4d82d12346918edd0a7564a5c6cbfe849532b7f - src/common/sdk/nvidia/inc/class/clca70.h
159b78a13e43a2afe6c17714a6f8619675480346 - src/common/sdk/nvidia/inc/class/clc86f.h
6ddba2e93c046ae04f48685c73f8f2d9fe74a398 - src/common/sdk/nvidia/inc/class/clc67a.h
83c6378ef27c8b640895a123801d27e6c4fd3754 - src/common/sdk/nvidia/inc/class/clc671.h
7f75433a769a020d9f36996c855c8ce6ab39dd83 - src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h
31ac68401e642baf44effb681d42374f42cf86b1 - src/common/sdk/nvidia/inc/class/cl00c3.h
95ca0b08eed54d1c6dd76fdf9cf4715007df1b20 - src/common/sdk/nvidia/inc/class/cl0020.h
20d5608c2d6e55efd6d1756a00739f7a05d3a2b3 - src/common/sdk/nvidia/inc/class/clc361.h
9797f4758d534181eeaa6bc88d576de43ba56045 - src/common/sdk/nvidia/inc/class/clc574.h
a39d75d3e479aebaf3849415e156c3cfe427298a - src/common/sdk/nvidia/inc/class/clc771.h
eac86d7180236683b86f980f89ec7ebfe6c85791 - src/common/sdk/nvidia/inc/class/cl957d.h
f7a2fea4725d59e95294c397ede001504b777b0d - src/common/sdk/nvidia/inc/class/clc697.h
f3f33f70ec85c983acec8862ccaabf5b186de2bb - src/common/sdk/nvidia/inc/class/cl9270.h
8b94512c9746c6976c4efeee0291bf44bb5e0152 - src/common/sdk/nvidia/inc/class/clcc73.h
60d0c7923699599a5a4732decfbcb89e1d77b69e - src/common/sdk/nvidia/inc/class/cl9770.h
e0c9a155f829c158c02c21b49c083168f8b00cbe - src/common/sdk/nvidia/inc/class/clc37dswspare.h
499bc681107a2b7ad7af3d2211b582b8fb9d9761 - src/common/sdk/nvidia/inc/class/clcc7a.h
e1bfd0c78f397e7c924c9521f87da8286bebe3f1 - src/common/sdk/nvidia/inc/class/cl84a0_deprecated.h
2f291dc867e71f625c59f72787b9fb391a16d0e6 - src/common/sdk/nvidia/inc/class/clc638.h
8d2dcc086f892dd58270c9e53e747513ed4b2f93 - src/common/sdk/nvidia/inc/class/clb06f.h
3d262347ab41547d9ccc28a892d24c83c6b1158e - src/common/sdk/nvidia/inc/class/cla06f.h
bae36cac0a8d83003ded2305409192995d264d04 - src/common/sdk/nvidia/inc/class/cl0001.h
ba8f5899df4287b8440bcb9c8e09e10db73ebf12 - src/common/sdk/nvidia/inc/class/clc97a.h
7bfcd7cf1735b2a54839e8a734e2227060ebf570 - src/common/sdk/nvidia/inc/class/clc197.h
e231c552afb3a78da7341ee49bf36940f1f65202 - src/common/sdk/nvidia/inc/class/clc77d.h
821396a58944ba4620f43cf6ee833b7a04d67193 - src/common/sdk/nvidia/inc/class/clc970.h
1f1879fcddf3c3f1f6c44df0e51822ad1bfa1aae - src/common/sdk/nvidia/inc/class/cl9171.h
a23967cf3b15eefe0cc37fef5d03dfc716770d85 - src/common/sdk/nvidia/inc/class/clc372sw.h
02ff42b6686954e4571b8a318575372239db623b - src/common/sdk/nvidia/inc/class/cl30f1_notification.h
4be055f206ef1049e8a5b824f9f4830eba0e224c - src/common/sdk/nvidia/inc/class/cla26f.h
ef173136a93cdd2e02ec82d7db05dc223b93c0e1 - src/common/sdk/nvidia/inc/class/clc770.h
a3e011723b5863277a453bfcfb59ce967cee0673 - src/common/sdk/nvidia/inc/class/clc670.h
f33b9fdad6ceb534530fecfd16b40a71f5f5cfdc - src/common/sdk/nvidia/inc/class/clc56f.h
02906b5ba8aab0736a38fd1f6d7b4f6026a5185b - src/common/sdk/nvidia/inc/class/clc57esw.h
aa6387d7ce55a88789c5731e89dedde57115131c - src/common/sdk/nvidia/inc/class/clc97b.h
86ab048c67a075349622c597fa9c4f2a9a3d8635 - src/common/sdk/nvidia/inc/class/cl9571.h
9b2d08d7a37beea802642f807d40413c7f9a8212 - src/common/sdk/nvidia/inc/class/clc37d.h
bd9f406625e6c0cce816a5ddfb9078723e7f7fb5 - src/common/sdk/nvidia/inc/class/clb0b5sw.h
ab27db8414f1400a3f4d9011e83ac49628b4fe91 - src/common/sdk/nvidia/inc/class/cl987d.h
2614a83d383b540f23ef721ec49af1dfde629098 - src/common/sdk/nvidia/inc/class/cl0080.h
9db39be032023bff165cd9d36bee2466617015a5 - src/common/sdk/nvidia/inc/class/cl0002.h
094bec72bfa8c618edc139bc353b20433f1c1da2 - src/common/sdk/nvidia/inc/class/cl2080_notification.h
e72a7871d872b2eb823cc67c0a7d4cafb3d0ca18 - src/common/sdk/nvidia/inc/class/cl90ec.h
0ad3b3e00dc83a0487bd96abd5fe467213aa51ad - src/common/sdk/nvidia/inc/class/clc597.h
869e41c3ba08d704fcf00541075986de43d6b090 - src/common/sdk/nvidia/inc/class/cl917b.h
b685769b5f3fed613227498866d06cc3c1caca28 - src/common/sdk/nvidia/inc/class/cl2082.h
4c0d054bd0d9935d8d2cedba3f5e910d6b6f8ed3 - src/common/sdk/nvidia/inc/class/clc997.h
1697a9ed528d633a1e78c0071868d7dff899af26 - src/common/sdk/nvidia/inc/class/clc57a.h
8e85d29d4006dbd3a913fcc088be5e8c87bbdabb - src/common/sdk/nvidia/inc/class/cl0100.h
15d1f928a9b3f36065e377e29367577ae92ab065 - src/common/sdk/nvidia/inc/class/cl0080_notification.h
e3bd2cacd357e411bc1b6b7d7660ffa97c3a7ee3 - src/common/sdk/nvidia/inc/class/clb197.h
16f9950a48c4e670b939a89724b547c5be9938bf - src/common/sdk/nvidia/inc/class/clc570.h
060722ac6a529a379375bb399785cbf2380db4fd - src/common/sdk/nvidia/inc/class/clc373.h
bd910ff84b9920af83e706a8ab37c68157a372c8 - src/common/sdk/nvidia/inc/class/clc97e.h
b71d1f698a3e3c4ac9db1f5824db983cf136981a - src/common/sdk/nvidia/inc/class/cl9170.h
2a031d85b85c4b1e5b278f6010ca8f33b2192de1 - src/common/sdk/nvidia/inc/class/cl90e7.h
9ceb4ec8538818c8b1dcc7ffe885584b8e0f435e - src/common/sdk/nvidia/inc/class/cla097.h
a9503a5558b08071f35b11df9a917310947c378b - src/common/sdk/nvidia/inc/class/cl00da.h
d8000ab8ef59e64d17b4089c43953ca69b7f605f - src/common/sdk/nvidia/inc/class/clc67e.h
6400b9ad3460dafe00424e3c1b1b7a05ab865a63 - src/common/sdk/nvidia/inc/class/cl50a0.h
7032fd79731907df00a2fe0bbf6c0f4ce87f021d - src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h
b11e7b13106fd6656d1b8268ffc15700fba58628 - src/common/sdk/nvidia/inc/class/clc371.h
ff47d8a4b4bdb3b9cd04ddb7666005ac7fcf2231 - src/common/sdk/nvidia/inc/class/cl003e.h
0285aed652c6aedd392092cdf2c7b28fde13a263 - src/common/sdk/nvidia/inc/class/cl00fc.h
81b4e4432da8412c119e795662819cfe7558711f - src/common/sdk/nvidia/inc/class/cl917a.h
38265d86eb7c771d2d3fc5102d53e6a170a7f560 - src/common/sdk/nvidia/inc/class/cl0041.h
848c89981de73d681615266e4e983b74c2ef418f - src/common/sdk/nvidia/inc/class/cla06fsubch.h
2d76476dba432ffc1292d2d5dd2a84ff3a359568 - src/common/sdk/nvidia/inc/class/cl0092.h
b46b2cfcf72fc2f9722bd42cea8daaeeda861471 - src/common/sdk/nvidia/inc/class/clc871.h
022e8405220e482f83629dd482efee81cc49f665 - src/common/sdk/nvidia/inc/class/clc77f.h
fe7484d17bc643ad61faabee5419ddc81cf9bfd6 - src/common/sdk/nvidia/inc/class/cl9570.h
bb79bbd1b0a37283802bc59f184abe0f9ced08a5 - src/common/sdk/nvidia/inc/class/cl0040.h
6249715d9876f5825ad62f563bf070e93710a2ad - src/common/sdk/nvidia/inc/class/clc67d.h
b1133e9abe15cf7b22c04d9627afa2027e781b81 - src/common/sdk/nvidia/inc/class/cl917c.h
7ef21c4f4fd4032c8f25f8fb33669e692a26e700 - src/common/sdk/nvidia/inc/class/clcb97tex.h
73b706e4916f4c70302387c88c8e14e7b2c1f4e6 - src/common/sdk/nvidia/inc/class/clc67b.h
c40fd87fa6293d483b5bf510e2e331143ded9fa4 - src/common/sdk/nvidia/inc/class/cl9470.h
20894d974d1f8f993c290463f1c97c71fd2e40b1 - src/common/sdk/nvidia/inc/class/cl30f1.h
9f7f04825f3f218cc0c4610938935e2f0a73e13b - src/common/sdk/nvidia/inc/class/clc97d.h
04ab1761d913030cb7485149ecd365f2f9c0f7da - src/common/sdk/nvidia/inc/class/cl0005_notification.h
da8d312d2fdc6012e354df4fa71ed62ae4aac369 - src/common/sdk/nvidia/inc/class/cl927c.h
158c98c8721d558ab64a025e6fdd04ce7a16ba9e - src/common/sdk/nvidia/inc/class/cl947d.h
5416c871e8d50a4e76cbad446030dbedbe1644fd - src/common/sdk/nvidia/inc/class/cl00f2.h
0b35244321b1f2f6647f8389f6fa7254c34790e2 - src/common/sdk/nvidia/inc/class/cl90cdtrace.h
39161706917567f434a6fff736b22f3358923e68 - src/common/sdk/nvidia/inc/class/clc06f.h
bc3674f2384cb3695ce5f035ed16e9c39bba4d1b - src/common/sdk/nvidia/inc/class/cl00fe.h
dd4f75c438d19c27e52f25b36fc8ded1ce02133c - src/common/sdk/nvidia/inc/class/cl917cswspare.h
435a34753d445eb9711c7132d70bd26df2b8bdab - src/common/sdk/nvidia/inc/class/cl917d.h
b31019107ada7b0fb8247c09d93b95a630821fa8 - src/common/sdk/nvidia/inc/class/clcc7e.h
31939808cd46382b1c63bc1e0bd4af953302773f - src/common/sdk/nvidia/inc/class/cl977d.h
83427e3172c64c3b9ef393205ccc3b961ec65190 - src/common/sdk/nvidia/inc/class/cl5070.h
db8dd50ad3e64fe0472d82c0940908d5da5e0321 - src/common/sdk/nvidia/inc/class/cla0b5.h
28867d69a6ceac83da53a11a5e1ef87d9476f0be - src/common/sdk/nvidia/inc/class/clc57d.h
8b07d7aca050be883fdc0d6f4b19eac0b0b6c796 - src/common/sdk/nvidia/inc/class/clc673.h
c116d91177c6cbfb8c25e7f35bb49a8d5a51816c - src/common/sdk/nvidia/inc/class/cl008f.h
4fc2133935b8e560c9a1048bc0b1f1c2f0a4464c - src/common/sdk/nvidia/inc/class/cl00c1.h
5a6098f821e8faa19345313477726431f9271cde - src/common/sdk/nvidia/inc/class/clc661.h
6db83e33cb3432f34d4b55c3de222eaf793a90f0 - src/common/sdk/nvidia/inc/class/cl00b1.h
5b573deb4d68ccb67d9cccc11b28203c5db3d2f7 - src/common/sdk/nvidia/inc/ctrl/ctrl0002.h
88947927d79e15df8cbf77a59ac883a29e970413 - src/common/sdk/nvidia/inc/ctrl/ctrlc638.h
625af1df5c9453bd35a9e873ee5c77e73d5fd195 - src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h
ade4a731f59c7cd16b4a60d318a19147b9918bb9 - src/common/sdk/nvidia/inc/ctrl/ctrl0004.h
90843f8173a341deb7f1466cd69a17114c6b9e4f - src/common/sdk/nvidia/inc/ctrl/ctrl90f1.h
a305225ceda0a39c76ed61b819a1f4165f5644f5 - src/common/sdk/nvidia/inc/ctrl/ctrl00fe.h
be3c9e2de8b8d33fe04389b224fa6ad95ecd089b - src/common/sdk/nvidia/inc/ctrl/ctrla06f.h
c3e3213f548f93592f7d3dfd76e63a2102d800ec - src/common/sdk/nvidia/inc/ctrl/ctrl0076.h
d7415e78725899f9d10fa2d5f03f3d62cef42f26 - src/common/sdk/nvidia/inc/ctrl/ctrlc36f.h
9e343f73f46238075cef766cad499533559dfa28 - src/common/sdk/nvidia/inc/ctrl/ctrl00da.h
f7601ce8c7c2d7a1143bff5280e3e5d9b5c4c147 - src/common/sdk/nvidia/inc/ctrl/ctrl906f.h
97ac039e796faca6c9f78e16020fe96225b33492 - src/common/sdk/nvidia/inc/ctrl/ctrlc637.h
fe7ce28fe76174a6de68236b44ea565ba2ea687b - src/common/sdk/nvidia/inc/ctrl/ctrl00de.h
3ba6904c69aa7710c4561d5643b18fc41e141d4e - src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h
b178067ba5f93e7fafb4c2ee0f5032acf9bc55d7 - src/common/sdk/nvidia/inc/ctrl/ctrla081.h
58a5d3a55b2d9b29d4f1b1e7b5d4d02ae6885e30 - src/common/sdk/nvidia/inc/ctrl/ctrl003e.h
16a24249210637987d17af6069ae5168404743ee - src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h
58f8e48d5851cc10e3c5fd3655d7948b9f327ca0 - src/common/sdk/nvidia/inc/ctrl/ctrl2080.h
b86c4d68c5758f9813f00cc562110c72ef602da7 - src/common/sdk/nvidia/inc/ctrl/ctrl90e7.h
c042a366bc755def9e4132e2768c1675871dbe65 - src/common/sdk/nvidia/inc/ctrl/ctrl0041.h
c8b2e0e64bb3cf3c562dee5fa7913035f82d8247 - src/common/sdk/nvidia/inc/ctrl/ctrl402c.h
352825959d98fe9b47a474cfdd154d380c80d24e - src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h
9d908bb15aecc9d8094e1b6c13301efba6032079 - src/common/sdk/nvidia/inc/ctrl/ctrl0080.h
3fcf5dbb82508d88a040981a7ab21eac1466bb2b - src/common/sdk/nvidia/inc/ctrl/ctrl0073.h
bfee287b190fd698735c5660592741ba5c25a8ea - src/common/sdk/nvidia/inc/ctrl/ctrl0020.h
2e65ccd2704919780a152c69f53400a0dc5e6e41 - src/common/sdk/nvidia/inc/ctrl/ctrlb06f.h
4fb7753f3502303314d9e8f853ee3b752f7e9317 - src/common/sdk/nvidia/inc/ctrl/ctrl0100.h
8764e07e9d348163db4eb41b0c3cf32c76172c0d - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h
5782a19aeaf9695c13940cf4532e41523a8460e3 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h
f21c15122509a8843e676a2bd5e799c58cd96379 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h
326b61039197db58d8369256f6d7dc9764aea421 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h
e7452921bdbd036ca3a37c60c49829c05e95c2d5 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h
5f3b68d39f14137d33f239408a6a13543f4ac966 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h
d08ef822e97ee56984618d52ed3ed55ee395eadb - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h
8fcc64b22b0f6cde40d5ecd23e5e2444277a5999 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h
70d65d4f923ec0efd8931433ae50930d12f78a07 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h
a33a1c1173962183793d84276e46c61d27ca867e - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h
1b594c39d1439c3d1ecc24c4325b2ea8c2724548 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h
0146d2b3ecec8760e76dacd8ce6bb75c343c6cac - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h
11abea0cdf485863196de56169451980ee6c016a - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h
4f0ccb0667bd3e3070e40f3f83bede7849bc78e4 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h
08dda80bac8d3418ad08e291012cf315dc9e5805 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h
28b06c8f8152dce2b2e684a4ba84acd25a8b8c26 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h
add9e3867e3dbd2c11bed36604680af4aaa0f164 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h
2ffb93d092df65570b074ad97f0bb436a1c66dff - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h
79fd7ed84cb238ea90ea3691f40ea7140034d3dc - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h
2ea79d79223b06633fb7f541ebbe5a300ba3885f - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h
44c9aa512eb0b9b92cace9e674299f2a9227c37c - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h
a3328cf6633f9b04258eff05ce30e66cc6930310 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h
a427892e601a4ca4f88cc5778ff78895324f3728 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h
92ff82d1045933baa79958a9f6efd451b0123e95 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h
7ef9e10955708592e92e127eb3fb372adff44818 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h
3c1bd0db339456c335acd50a75ace42cb8bbe6f8 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h
be10e3f4a9dd2f2ab35305ee0af628ef339b25a7 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h
db66195c8e7252c5f424953275cbb7be90a17ba8 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h
c74ac448c3382d92e662804b56e73edd748e2678 - src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83debase.h
7318f74523bb6a015e561dba1a06b47a278d856d - src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83dedebug.h
702d9cb471a344a25911449cc580f69f7155ab1c - src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h
3f747a4fc98291329e0245a971248cf2c28a1b60 - src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h
19c7eff334c591c803dcd93fc0818798c281df48 - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbase.h
c7dcbc0ae7454df6523c6deb5f07a70dc2fdbc15 - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fgpu.h
882b13d54585a6fc5534d12b9cdcec29c8cde337 - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fucodecoverage.h
76fb63a6782ff1236303fdd7bf2698f42965a266 - src/common/sdk/nvidia/inc/ctrl/ctrl90e7/ctrl90e7base.h
00d2655f569187190bd117bdf37fe4ddd5e92320 - src/common/sdk/nvidia/inc/ctrl/ctrl90e7/ctrl90e7bbx.h
8064c31eb1e447561c415f9835aecac97d5f3517 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h
713aa1291aef3f79304ad35c5143a7576f242f63 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h
bb7955387f6a286927e7922019676ca0aba713e6 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h
35367f08b96510a5312653b5197d6bb34c0a3d00 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h
a0cf9dfb520e3320cd9c154c01cd2f1a7bbbd864 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h
c2066c407f81538047c435fffca2705c28107663 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h
d727b328e995a7d969ec036f2d5b52264568a7bf - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h
52f251090780737f14eb993150f3ae73be303921 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h
77eb4fab61225663a3f49b868c983d5d532ca184 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h
6ca26c7149455e43f32e8b83b74f4a34a24a2d29 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h
134d43961ea1d42fc36d75685fdd7944f92b0b53 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h
022feef64678b2f71ab70dc67d5d604054990957 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h
2a00952f0f3988c5425fec957a19d926ae75ba28 - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h
79b38bbe679d397b48b78266aa5f50459fe5b5bc - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h
514d012dbfd9e056b7f729bccb213fa9193d433e - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370or.h
6ef99465758f71f420ac17765380cc37dbcac68a - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h
5f70c2eb6a144bc4d7ca8be63fa46391909e8201 - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h
f4ed3ccff4720114d1aaed82484ed70cf07626db - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h
ba3b73356bf0d1409ecfd963b623c50ec83f1813 - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06finternal.h
bb0a5ff091ef854b19e7da0043b7b7b10232c3de - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fbase.h
1f25c9f215991f34fee94dafac5fad0e7460db1c - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h
ddeb0df51d5f662948f9098a5d85b40c8ab6504b - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h
e3fb93f0ff3469ec76cecdc6f0bf1c296551a2b1 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h
a138379dd76c468072f1862b8fc6ae79ee876b4e - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h
ee99443c1bd3441df474566622486b04c4502ac0 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h
44e1b06211eee31e42e81879f5220f26ddec70ae - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h
ff789d585a7f001b8bd32e07a268c635d39b17ab - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h
03f54e22b39ad5cf682eada7147c6c155f16b385 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h
e8d883de767aa995a374d8da56b5c9da8787cb1d - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h
8fdb493bda6119025c1d00f289a6394e7dcd1b53 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h
cfa32c37f373eeef53aedc3f4dffff1634c122e8 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h
41a0a14e04527fa2c349d2895bb41affd154c999 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h
ecd312fabb249a25655e151cee3615c5ab61ffa7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h
c30b5995d353e68623b32fea398f461351e3b8f1 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h
aa0f685b94bdae99a58aa1a45735b0593a2e6f5a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h
aa86ffd04a55436ecacbedb1626f6187bbddedf7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h
3423a69bba50e1405b5a7d631bfff1f6f0a1673f - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h
1990d0c4fa84c6d078282d4d7d0624ccb0325ce7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h
146263409e5304f661da349b56761ab7403144bd - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h
8b622186edb156e980d02bd59a71c01923d1aa23 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h
70dc706ea4ee7b143a716aae9e4f8c0bcef6c249 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h
0a156fc54f45386fabd06ef5ec11ba3a816fbfb7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h
c157e185d3c64ee9476ddc75881bfc5a5b8b997f - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h
785d96360f86bc53eb428fd3f4fbeda395400c8a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h
b8e8c5ccab01d7997d1fd5579a690cb3279a8ab3 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h
b2eecbca32d87b939858bf0b22f93c06b49b3a04 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h
24a891a02e1a882769d4da3454e4dfcf42b1ea6c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h
6969b092708d57f88b0f0fdbb3464c786f90710c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h
013bd8d50841ea314f5ea2bd507470f2c3aff831 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h
d63388ff48ca055c82bcd6148506eacd0e26b4dc - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vgpumgrinternal.h
96f72ec608cd198be995f3acd9c04afe7c7e6dc8 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h
359c6b06f2712a527d1ef08465179c14a8b4a751 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h
4c2af959d06536294d62b2366a6ba61ca744bd50 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h
d15e8e86ca66b3a69a774e322dfdd349b9f978b9 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spdm.h
898fa08818b657c27b456d952e7a4e09d8d197ee - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h
9933e90ad92eb7df2f64dcc30dcd680d5f7c530d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h
18d1a44b7113c1707bbf5c65fb1be790304c0bed - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h
0cd5e883dfafb74ce2ec9bccca6e688a27e6cfa9 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h
07f82ae90cde3c6e2e6c5af135c40e01660c39a3 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h
c8f1c115d78bab309c0a887324b0dabfb8f9ea2d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h
ecceb8f7382c8f55c6ccd0330e14ccbc49fcd09c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h
2577a1d505a3d682e223fbcbc6d4c7d13162749d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h
d3969094e68f9d584ba9c6fb5457801caff6ccc1 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmu.h
74f1abf45a2a0f60c82e4825b9abfa6c57cab648 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h
115f683e5926ae130de87e4cea805ef6915ed728 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h
d4ba227a522423503e5044c774dbcca692c48247 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h
97bb79e74b25134fa02a60d310b3e81170df6fd6 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h
baeb07c8bdadf835db754452f63d40956bc6a199 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h
338c7de5d574fe91cda1372c5221e754d4c4b717 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h
4e4a4f9e94f2d7748064949f4b16845829670bf6 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h
5ac6c9a299256935259eaf94323ae58995a97ad7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h
e4441458a7914414a2092f36a9f93389ed65154a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h
b55e4cf81b6112868eb6f6cd9c1a3b32f8fcda49 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h
302f79771fcdba3122cf61affb53e0a3a3a27e6d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h
5c7b955ef5e6f6ca9c0944e8a2b2c4a1ae760e04 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h
93a9fa93eb3d1099991e4682b6228124220ca293 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h
7f1af5b788616bab285a73bab5098fb6d134b159 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h
51dbd71f1cd5a66dd7a5b0fbb753713d27ff937c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h
cf1757ff453132fb64be0dec6c50eb935db29784 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink_common.h
59254e4bdc475b70cfd0b445ef496f27c20faab0 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h
119432bbce99e91484a2bac79ca5257a36a7f98b - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h
7f15697ca8645f77352f88c2a84713f348e98a24 - src/common/unix/nvidia-3d/include/nvidia-3d-vertex-arrays.h
220ac9628fe5afa0191b8c20304402baf0f70353 - src/common/unix/nvidia-3d/include/nvidia-3d-fermi.h
23478354284aa1be69bc70fa4157aa408177829c - src/common/unix/nvidia-3d/include/nvidia-3d-volta.h
75859a11c0fae125a0619c47ead964416ac8d6ed - src/common/unix/nvidia-3d/include/nvidia-3d-pascal.h
e621c127011311e8f97c8784d8539751a820bf47 - src/common/unix/nvidia-3d/include/nvidia-3d-maxwell.h
07fc2cd8495309f1218b9ddee4a4809b6dcb65a3 - src/common/unix/nvidia-3d/include/nvidia-3d-types-priv.h
1276b525f23b582e029c2ddc9ed0115f8e9dafb4 - src/common/unix/nvidia-3d/include/nvidia-3d-hopper.h
5030b264e17b70df0c99bc9da4350bdb48f2f60a - src/common/unix/nvidia-3d/include/nvidia-3d-kepler.h
146b4f305bfe710622a878fe3e9afd4f834124b8 - src/common/unix/nvidia-3d/include/nvidia-3d-turing.h
61f0a408812c04a59fb8f12713ce34d2ed544fe3 - src/common/unix/nvidia-3d/include/nvidia-3d-surface.h
e7a4acaef431a49ca7efd6bf72b6e8b57fafbab0 - src/common/unix/nvidia-3d/include/nv_xz_mem_hooks.h
40a9c57cca5b2f8acfe3ead472dcf0adc9423050 - src/common/unix/nvidia-3d/src/nvidia-3d-vertex-arrays.c
af1a4d99bd19b72de120ba2046f35b95650985b1 - src/common/unix/nvidia-3d/src/nvidia-3d-volta.c
f78f737f1dfb52cf248543cced017a8fbad7b270 - src/common/unix/nvidia-3d/src/nvidia-3d-surface.c
4ea7a2a6811239760a1b56833fb07dbf8a99a10e - src/common/unix/nvidia-3d/src/nvidia-3d-hopper.c
e43e6ce6b9781d44b68868703fdbb779fc95f5d4 - src/common/unix/nvidia-3d/src/nvidia-3d-kepler.c
09fa5fbae25e08c819277566d7281f17305863f8 - src/common/unix/nvidia-3d/src/nvidia-3d-turing.c
e0ef9ab77cfdf207c800a9c067739add28632047 - src/common/unix/nvidia-3d/src/nvidia-3d-pascal.c
57f19f6aa7b896794aafacd978b2469d976f6f78 - src/common/unix/nvidia-3d/src/nvidia-3d-maxwell.c
08c29625af227debb72dd703630a754ac4fbeee0 - src/common/unix/nvidia-3d/src/nvidia-3d-core.c
7ca41841cc54bd597f5c10cc346b8f574b1c2acf - src/common/unix/nvidia-3d/src/nvidia-3d-fermi.c
d0331b7ebba0537af50bdf5815d9c048cbeb3388 - src/common/unix/nvidia-3d/src/nvidia-3d-init.c
569a662ce5f79dc450f44eeb7a0ff36580ba27fe - src/common/unix/nvidia-3d/interface/nvidia-3d-types.h
a06524af04de90562b08b6b26783232cf7ff01d4 - src/common/unix/nvidia-3d/interface/nvidia-3d-utils.h
3e97ecc773087c0c7f370faf0a9ff838793c9bd6 - src/common/unix/nvidia-3d/interface/nvidia-3d-color-targets.h
2d91e6f3ad425d3ca95de79ecb929b22cac57f52 - src/common/unix/nvidia-3d/interface/nvidia-3d-shaders.h
fd454a2318e970e6b1cb4a4b7b5633e4cb2e8b45 - src/common/unix/nvidia-3d/interface/nvidia-3d.h
34daeec12bbf45f0f85406afc56414da45afc2e6 - src/common/unix/nvidia-3d/interface/nvidia-3d-shader-constants.h
727210acfe72963aa6dddf1bcee91dc122897113 - src/common/unix/nvidia-3d/interface/nvidia-3d-constant-buffers.h
069b576dc1f03143999512cd03fc48fe18ed6706 - src/common/unix/nvidia-3d/interface/nvidia-3d-imports.h
2476f128437c0520204e13a4ddd2239ff3f40c21 - src/common/unix/common/inc/nv-float.h
881cbcc7ed39ea9198279136205dbe40142be35e - src/common/unix/common/inc/nv_assert.h
cb7c13757ca480e10b4ef3e3851d82ad5ccca3f1 - src/common/unix/common/inc/nv_mode_timings.h
d5253e7e4abd3ad8d72375260aa80037adcd8973 - src/common/unix/common/inc/nv_dpy_id.h
3e64a8fe60bb1266a769be8a5c0716e10c816b38 - src/common/unix/common/inc/nv_amodel_enum.h
995d8447f8539bd736cc09d62983ae8ebc7e3436 - src/common/unix/common/inc/nv_common_utils.h
edded9ca3d455444372fe6c497b2d61bd0cc3f96 - src/common/unix/common/utils/nv_memory_tracker.c
7bccb5a3dea9208f0fbd86d36efc369f215d5c3c - src/common/unix/common/utils/unix_rm_handle.c
26f2a36442266c5d2664d509ecfd31094a83e152 - src/common/unix/common/utils/nv_vasprintf.c
e903bbbecf4fb3085aaccca0628f0a0e4aba3e58 - src/common/unix/common/utils/nv_mode_timings_utils.c
667b361db93e35d12d979c47e4d7a68be9aa93b6 - src/common/unix/common/utils/interface/nv_mode_timings_utils.h
07c675d22c4f0f4be6647b65b6487e2d6927c347 - src/common/unix/common/utils/interface/nv_memory_tracker.h
8d9c4d69394b23d689a4aa6727eb3da1d383765a - src/common/unix/common/utils/interface/unix_rm_handle.h
9e008270f277e243f9167ab50401602378a2a6e8 - src/common/unix/common/utils/interface/nv_vasprintf.h
673bbd33569f55a900b5388a77d19edd3822ecf3 - src/common/unix/xzminidec/src/xz_dec_stream.c
9c67bdcbea04fbe1a5b2746549e502cdc368b54e - src/common/unix/xzminidec/src/xz_config.h
f2cfbcf1e2cb1d7545b5de609a4e7672bf8ae976 - src/common/unix/xzminidec/src/xz_dec_bcj.c
93af3bcdf863afa9655107c86f49aefdf9c05d90 - src/common/unix/xzminidec/src/xz_lzma2.h
fba46fe8f4a160d71a708578a85ab6731e4e024f - src/common/unix/xzminidec/src/xz_crc32.c
0ce26be0fb63a7ae52e2bb15a1770c80b9a5ac84 - src/common/unix/xzminidec/src/xz_stream.h
8365ec8d875fad74507d49228ad8959c66bbc360 - src/common/unix/xzminidec/src/xz_dec_lzma2.c
2ade48b4c53fc3bebf1587bc0a1a08b26cd5981d - src/common/unix/xzminidec/src/xz_private.h
c2a87873eeff2a8010bb8a2cb8d1df28a20a0097 - src/common/unix/xzminidec/interface/xz.h
4498dc65d71b2b8635b365550e5e521da14c8e6b - src/common/unix/nvidia-push/include/nvidia-push-priv.h
4847b168b4f5e78dbb92cfec80734789a9131b87 - src/common/unix/nvidia-push/include/nvidia-push-priv-imports.h
616dd99d8dda5dbe35032a5fc558ff48f7cc1620 - src/common/unix/nvidia-push/src/nvidia-push-init.c
0916485ec1ff275771d88a725dcbf586663dbc33 - src/common/unix/nvidia-push/src/nvidia-push.c
548f9e591d2c851b157575e1b83e25eb47bc61e6 - src/common/unix/nvidia-push/interface/nvidia-push-methods.h
5f5013bdbda9582252db2e92a105a57f24ca7d96 - src/common/unix/nvidia-push/interface/nvidia-push-init.h
f3576444d1dbcc4e9379bee6151ef8c7a382e276 - src/common/unix/nvidia-push/interface/nvidia-push-utils.h
918c4f2e2edd0a52c7085f758286dacd21b5b4c5 - src/common/unix/nvidia-push/interface/nvidia-push-types.h
b54add7dea08ff736ac27ee259f6ccb389c01f09 - src/common/unix/nvidia-headsurface/nvidia-headsurface-types.h
5d014581148b38eede1d31a1f48e388cf6eb7a45 - src/common/unix/nvidia-headsurface/nvidia-headsurface-constants.h
e1fbb040ea9d3c773ed07deb9ef5d63c8c8cab7a - src/common/inc/nvSha1.h
8f0d91e1a8f0d3474fb91dc3e6234e55d2c79fcc - src/common/inc/rmosxfac.h
bcad75550591ede46152403e40413f87e85b0a80 - src/common/inc/nvlog_defs.h
ebccc5c2af2863509e957fe98b01d9a14d8b0367 - src/common/inc/nv_list.h
0e970acfcadddd89fae91c812647fecb80c98d52 - src/common/inc/pex.h
73e2133709eb920a92fcebf7aaab958020493183 - src/common/inc/nvctassert.h
6fa5359ffe91b624548c226b6139f241771a9289 - src/common/inc/jt.h
489ce9f046d9c2ff95a1284ab5e04b5843b874ae - src/common/inc/nvVer.h
7ab322addb3e1ba880cee07dc0d26d882db097b0 - src/common/inc/nvCpuIntrinsics.h
d9c0905f374db0b9cc164ce42eab457d1ba28c53 - src/common/inc/nvop.h
d70c17a0693c8b5dbf7c83f693eec352ce22917c - src/common/inc/nv_smg.h
b4c5d759f035b540648117b1bff6b1701476a398 - src/common/inc/nvCpuUuid.h
4282574b39d1bcaf394b63aca8769bb52462b89b - src/common/inc/nvBinSegment.h
8c41b32c479f0de04df38798c56fd180514736fc - src/common/inc/nvBldVer.h
62e510fa46465f69e9c55fabf1c8124bee3091c4 - src/common/inc/nvHdmiFrlCommon.h
82aadec9509f41eab58727c3498dc24a30a0128e - src/common/inc/nvrmcontext.h
d74a8d4a9ae3d36e92b39bc7c74b27df44626b1c - src/common/inc/nv_mig_types.h
a346380cebac17412b4efc0aef2fad27c33b8fb5 - src/common/inc/nvlog_inc2.h
e670ffdd499c13e5025aceae5541426ab2ab0925 - src/common/inc/gps.h
963aebc9ec7bcb9c445eee419f72289b21680cdd - src/common/inc/hdmi_spec.h
987027bed503d8ce5ad01706aae4a16ee37f3e2d - src/common/inc/nvSemaphoreCommon.h
5257e84f2048b01258c78cec70987f158f6b0c44 - src/common/inc/nvlog_inc.h
4a88a536b71995db70e3a83a48d47072693ec69d - src/common/inc/nv_speculation_barrier.h
2408132586b69e580ff909f7f66451aa2882abff - src/common/inc/nvPNPVendorIds.h
4f7ca8fb43d6885cf60869ed241476032f20f5f3 - src/common/inc/nvUnixVersion.h
23edf9cce2608c494dad045b9466b8f3a18bab56 - src/common/inc/displayport/dpcd20.h
ecc26f6fae35818791733c1a56ea1b556bba7f4f - src/common/inc/displayport/displayport2x.h
aad6f14dacdb166a8d884cae6c5f382d98e5c46c - src/common/inc/displayport/dpcd14.h
27572a26d0a0a32f38606323ea6da65096bac039 - src/common/inc/displayport/displayport.h
8f7c9c19a76eca84fc2556841042c2f1c3d07a1a - src/common/inc/displayport/dpcd.h
4ee8a4d2a0fe12d348ac4c1a1e0a22bd272e146d - src/common/inc/swref/common_def_nvlink.h
e182f9538fea08b5d25f3e74083a7a12a7d49809 - src/common/inc/swref/published/nv_ref.h
641e9803749cbeeca1149c43fe2da5e6edf25137 - src/common/inc/swref/published/nv_arch.h
059493ce7d5390b7e859a19d1a24752df8126ace - src/common/inc/swref/published/turing/tu102/kind_macros.h
86a59440492fd6f869aef3509f0e64a492b4550d - src/common/inc/swref/published/turing/tu102/dev_mmu.h
38589617aab40efdd86b401a18d1e28b5d3b9f8e - src/common/inc/swref/published/disp/v05_02/dev_disp.h
1ea0c3d6ea0c79c01accc7b25d15b421ab49a55d - src/common/inc/swref/published/disp/v04_02/dev_disp.h
c01e4a95ede641ff5a9e6918b39db4d2099c91cb - src/common/inc/swref/published/disp/v05_01/dev_disp.h
04345c77f8c7a8b4825f0cb7fc96ca7c876af51c - src/common/inc/swref/published/disp/v04_01/dev_disp.h
1604a3fa3e3142118c82a1dc621cdac81806195a - src/common/inc/swref/published/disp/v03_00/dev_disp.h
c4f12d6055573a19f9211fdddd3778575e2a17fd - src/common/inc/swref/published/disp/v02_04/dev_disp.h
64c123c90018c5ee122b02b02cbccfcd5ec32cab - src/common/inc/swref/published/t23x/t234/dev_fuse.h
b5ce995e9e5afcd73d39642e31998e087ea133e8 - src/common/shared/nvstatus/nvstatus.c
08816a33e698308c76f3a026c29d0dcb41c5ee20 - src/common/shared/inc/compat.h
9231ac111286772170925e8f6cf92bde5914abb8 - src/common/shared/inc/nvdevid.h
750ecc85242882a9e428d5a5cf1a64f418d59c5f - src/common/displayport/inc/dp_object.h
a6ff1a7aee138f6771c5b0bbedb593a2641e1114 - src/common/displayport/inc/dp_messages.h
80380945c76c58648756446435d615f74630f2da - src/common/displayport/inc/dp_timeout.h
cdb1e7797c250b0a7c0449e2df5ce71e42b83432 - src/common/displayport/inc/dp_merger.h
070b4f6216f19feebb6a67cbb9c3eb22dc60cf74 - src/common/displayport/inc/dp_buffer.h
02b65d96a7a345eaa87042faf6dd94052235009c - src/common/displayport/inc/dp_messageheader.h
78595e6262d5ab0e6232392dc0852feaf83c7585 - src/common/displayport/inc/dp_auxbus.h
e27519c72e533a69f7433638a1d292fb9df8772e - src/common/displayport/inc/dp_crc.h
b2db6b37515f7c979e18686694546b9fa5145459 - src/common/displayport/inc/dp_hostimp.h
29ee5f4ef6670f06e96c07b36c11e3bad8bee6aa - src/common/displayport/inc/dp_address.h
575f4f97189ad6b4944bdd4127cdbee79d8c688d - src/common/displayport/inc/dp_groupimpl.h
cf09c061fa898cd84edd34a9457726abc501b03c - src/common/displayport/inc/dp_configcaps.h
afa1135330de2ce8f1a6d20e99b54f507b5adbbd - src/common/displayport/inc/dp_evoadapter.h
01f1dd58ed5bb12503fa45be7a6657cde0a857e2 - src/common/displayport/inc/dp_guid.h
cca426d571c6b01f7953180e2e550e55c629f0f4 - src/common/displayport/inc/dp_auxretry.h
a086546bf92d7e5e9adf66dcac012b3dc81c2597 - src/common/displayport/inc/dp_internal.h
f6e1b0850f5ed0f23f263d4104523d9290bb8669 - src/common/displayport/inc/dp_vrr.h
2f134665b274bb223c3f74e0ec5c6a0392fa6387 - src/common/displayport/inc/dp_discovery.h
07d22f84e6a386dad251761278a828dab64b6dd5 - src/common/displayport/inc/dp_bitstream.h
f09aae8321de23e0a48072d0e082aecb84a3ebbe - src/common/displayport/inc/dp_mainlink.h
cae50568f7bef4a2a69c4d718a5297b9ae15da3f - src/common/displayport/inc/dp_deviceimpl.h
eb9cdbb0a907926b1afd2a551ec19830f06ae205 - src/common/displayport/inc/dp_splitter.h
5bd3706ceea585df76a75dda7f9581b91ee8f998 - src/common/displayport/inc/dp_tracing.h
4a098c4d09dedc33b86748d5fe9a30d097675e9f - src/common/displayport/inc/dp_list.h
6c87ce702f215b21c1ab0064a2a85b3eda96ecec - src/common/displayport/inc/dp_edid.h
be558902391fb6cb5085652b560391b54befca4b - src/common/displayport/inc/dp_printf.h
379d3933c90eaf9c35a0bad2bd6af960a321465f - src/common/displayport/inc/dp_wardatabase.h
2016714a04d46ac8412ef55d2156d86ba4d594eb - src/common/displayport/inc/dp_auxdefs.h
e2075486b392d6b231f2f133922ac096ca4bc095 - src/common/displayport/inc/dp_ringbuffer.h
09c80a469f1e7e0edd6381578d66fd0e789bc0db - src/common/displayport/inc/dp_regkeydatabase.h
7622cb576c2ebbfe65c0f6132d8561ab1815f668 - src/common/displayport/inc/dp_qse.h
dd420c9e7c271d8bea047d431667524105473e95 - src/common/displayport/inc/dp_linkconfig.h
e02e5621eaea52a2266a86dcd587f4714680caf4 - src/common/displayport/inc/dp_linkedlist.h
430f42522a1e60f2420aa2e4e471aa20945d0253 - src/common/displayport/inc/dp_timer.h
0f71b80d0a0d53fc6581ef341a4e637a467a3795 - src/common/displayport/inc/dp_connectorimpl.h
c8c55dfc7b085b421b01bd9dc7b74abe6f9a0932 - src/common/displayport/inc/dp_connector.h
78ef30b2caf2cf4ff441b5613a796b93ae8973bd - src/common/displayport/inc/dp_messagecodings.h
1363fca23628f312c4b6b0c868b8a43f4a8a5a24 - src/common/displayport/inc/dp_watermark.h
d2b00a849a81f6c6092e3b2c4e7ed20fcee62b39 - src/common/displayport/inc/dptestutil/dp_testmessage.h
70b155b0da07a92ede884a9cec715f67e6b5c3e8 - src/common/displayport/src/dp_list.cpp
107b170d4496a754f22819e66794bcdc51256b7c - src/common/displayport/src/dp_sst_edid.cpp
fea946e5320e7de8e9229bca8d4a6a14b9e8db59 - src/common/displayport/src/dp_crc.cpp
2caf1cd4a99e55126883dbdd9f6b74883c71e171 - src/common/displayport/src/dp_messagecodings.cpp
ef3fefa8dd819d4086c054919b769ca18d058469 - src/common/displayport/src/dp_wardatabase.cpp
c49e37f3e225e60a74c71a2b571e542e12fd9bc9 - src/common/displayport/src/dp_watermark.cpp
e874ffeaeb6deec57605bf91eaa2af116a9762bd - src/common/displayport/src/dp_bitstream.cpp
d699ce22e5e2d641caa2fbacca3095d7dd7b3ffe - src/common/displayport/src/dp_evoadapter.cpp
5f2fb1683cff15175e3ef2276b721863886adc79 - src/common/displayport/src/dp_vrr.cpp
0717b87aafecbe2216e0f0b53ee088a980ef7ad4 - src/common/displayport/src/dp_auxretry.cpp
0670fb5302b1bd3fc65daa848f23e4086619b5e6 - src/common/displayport/src/dp_discovery.cpp
5c12759c27407e8df4c8f1f7bc6ec1595b6b1a63 - src/common/displayport/src/dp_messages.cpp
93ba2409667997fdbcb7af1a8f24ec4a0e15b62c - src/common/displayport/src/dp_timer.cpp
ffdd039884b1400eaf4d6d7cc81d0faba5282014 - src/common/displayport/src/dp_deviceimpl.cpp
c625716e5516a290ac501563e2a73eef9b4f7dd6 - src/common/displayport/src/dp_edid.cpp
af1672e8abb92d8d574d9605285753a8580c5d10 - src/common/displayport/src/dp_groupimpl.cpp
2cda981a5e36285ba4173573d074f8761e74f186 - src/common/displayport/src/dp_qse.cpp
5c7adbdfe295f7e1a1d4899a62bf95b456f84412 - src/common/displayport/src/dp_messageheader.cpp
d3c4c54f96cc02d37fab45521685426e5c38fb4d - src/common/displayport/src/dp_mst_edid.cpp
f56f92e32710b0342805b785d34ba1a9f2a54ed3 - src/common/displayport/src/dp_guid.cpp
eb7e47407bd04e871f891038cc08736d066ffaa9 - src/common/displayport/src/dp_connectorimpl.cpp
a62b774b7c45882b5854b91b600987c343c24966 - src/common/displayport/src/dp_linkconfig.cpp
0a8818da34b5321763c1f60cb8b6ea5e1a2837f1 - src/common/displayport/src/dp_splitter.cpp
24c0787ce5ec691c6b8edb351000265f47e0156a - src/common/displayport/src/dp_buffer.cpp
422a5d3426d5e1cc2346d9d5f86ccde66062ffdc - src/common/displayport/src/dp_merger.cpp
41589d1d5bfa4316d5d066a7201226baed5332db - src/common/displayport/src/dp_configcaps.cpp
a0b68fce10eb0b95518cfd291e2d282872225295 - src/common/displayport/src/dptestutil/dp_testmessage.cpp
f0a73cd173382d8abd4b0c70da8b32e144740bb5 - src/common/modeset/timing/nvt_dmt.c
15d7c508b621c877887962b2c27cdb6c7d1144a0 - src/common/modeset/timing/nvt_util.c
1341b987df8336c882e31d22d2141cadfb67272d - src/common/modeset/timing/nvtiming.h
f8faf3eabd24a1239e1d4faebdc40c0ffa713ff9 - src/common/modeset/timing/nvt_edid.c
c95a1c7914b0d1cba366f2a29e08eb93e0ad033d - src/common/modeset/timing/nvt_edidext_displayid.c
3d3a0889baed7a15c2adce54ba56c1dc783faffd - src/common/modeset/timing/dpsdp.h
ff92b05f8648cb4bc31c0f64707065bb56ff3eb3 - src/common/modeset/timing/nvt_dsc_pps.c
f75b1d98895bdccda0db2d8dd8feba53b88180c5 - src/common/modeset/timing/displayid.h
1997adbf2f6f5be7eb6c7a88e6660391a85d891b - src/common/modeset/timing/nvt_gtf.c
2737ed1d1eccd163f9cd12b1944f96a03c526b31 - src/common/modeset/timing/nvtiming_pvt.h
58b68f1272b069bb7819cbe86fd9e19d8acd0571 - src/common/modeset/timing/edid.h
6d221aad371436ba304448ba2cf04f89148a09bb - src/common/modeset/timing/nvt_edidext_displayid20.c
48761f63bc2794dfbde10492cc53137458cfcd0e - src/common/modeset/timing/nvt_dsc_pps.h
08ef97092899a3dc80251f61cedc73a851d70baa - src/common/modeset/timing/nvt_edidext_861.c
d7cb716eeae50ecfe44fb3c4c4476de598ab78d7 - src/common/modeset/timing/nvt_tv.c
080c1de64d099ecb1aeb9b0b2f176f7be2d609b5 - src/common/modeset/timing/displayid20.h
1c2e163802849848e9ae1586d38c4cd82494217f - src/common/modeset/timing/nvt_ovt.c
54aa88075d9ceb9c6ef99d9c15cb32751a33f8d0 - src/common/modeset/timing/nvt_cvt.c
e13cbe77f864afcddaccff7aeb1923cd02f1482f - src/common/modeset/timing/nvt_displayid20.c
f8911888bdd441666c03fe27381d7730b7dd9131 - src/common/modeset/hdmipacket/nvhdmipkt_internal.h
12118b508a757fd0a162d1e740d93685a67363ea - src/common/modeset/hdmipacket/nvhdmipkt.c
5b541b9ab6fe9333815a760d4043fef725b1c848 - src/common/modeset/hdmipacket/nvhdmipkt_C971.c
83d94f0a5eb7318d00d96115b0139f9f99052ddc - src/common/modeset/hdmipacket/nvhdmipkt_CC71.c
b390bf4f74d690068ff24dce90b79b227769ac2f - src/common/modeset/hdmipacket/nvhdmipkt_C671.c
206727972ab3a5f8a2cde0e153d63aef929b6c01 - src/common/modeset/hdmipacket/nvhdmipkt_0073.c
a71968671ce6b64e235de6902bebc2a06da7ae04 - src/common/modeset/hdmipacket/nvhdmipkt_9171.c
54a1b5e5aaf0848a72befc896ed12f1de433ad4f - src/common/modeset/hdmipacket/nvhdmipkt_9471.c
57dbf547549c6fe24eb51cc54185b321c263108f - src/common/modeset/hdmipacket/nvhdmipkt.h
9be7b7be94a35d1d9a04f269ff560dbbb7860a2a - src/common/modeset/hdmipacket/nvhdmipkt_9571.c
559406ebdbd7f810f1ecbeb3e78b6518834b90fe - src/common/modeset/hdmipacket/nvhdmipkt_class.h
e1df3885cd76f5159801c1f66f20b18537eaecf3 - src/common/modeset/hdmipacket/nvhdmipkt_C871.c
5e12a290fc91202e4ba9e823b6d8457594ed72d3 - src/common/modeset/hdmipacket/nvhdmi_frlInterface.h
67db549636b67a32d646fb7fc6c8db2f13689ecc - src/common/modeset/hdmipacket/nvhdmipkt_9271.c
e6d500269128cbd93790fe68fbcad5ba45c2ba7d - src/common/modeset/hdmipacket/nvhdmipkt_C371.c
764d216e9941d0dcf41e89b2a0ddd8acf55902c8 - src/common/modeset/hdmipacket/nvhdmipkt_common.h
b882497ae393bf66a728dae395b64ac53602a1a5 - src/common/softfloat/nvidia/nv-softfloat.h
be9407a273620c0ba619b53ed72d59d52620c3e4 - src/common/softfloat/nvidia/platform.h
f6d98979ab2d1e2b0d664333104130af6abbcad5 - src/common/softfloat/source/f64_to_i64_r_minMag.c
21a6232d93734b01692689258a3fdfbbf4ff089d - src/common/softfloat/source/s_roundToUI32.c
29321080baa7eab86947ac825561fdcff54a0e43 - src/common/softfloat/source/i32_to_f32.c
dafa667ee5dd52c97fc0c3b7144f6b619406c225 - src/common/softfloat/source/s_mulAddF64.c
108eec2abf1cddb397ce9f652465c2e52f7c143b - src/common/softfloat/source/f64_roundToInt.c
513a7d1c3053fc119efcd8ae1bcc9652edc45315 - src/common/softfloat/source/f32_lt.c
d19ff7dfece53875f2d6c6f7dd9e7772f7b0b7ec - src/common/softfloat/source/f32_to_i64_r_minMag.c
2db07bbb8242bc55a24ef483af6d648db0660de0 - src/common/softfloat/source/f32_add.c
c951c9dffa123e4f77ed235eca49ef9b67f9f3d2 - src/common/softfloat/source/s_subMagsF64.c
5c1026617c588bcf5f1e59230bd5bb900600b9ac - src/common/softfloat/source/f64_mul.c
5c4ee32cc78efc718aaa60ec31d0b00b1bee3c2c - src/common/softfloat/source/f64_to_ui64_r_minMag.c
6fa7493285fe2f7fdc0ac056a6367e90327905c2 - src/common/softfloat/source/f32_sub.c
da3b3f94a817909a3dc93ca5fa7675805c7979e0 - src/common/softfloat/source/f64_isSignalingNaN.c
d701741d8d6a92bb890e53deda1b795f5787f465 - src/common/softfloat/source/f64_le.c
baa7af4eea226140c26ffe6ab02a863d07f729fb - src/common/softfloat/source/f64_eq_signaling.c
2e5c29d842a8ebc5fbf987068dc9394cee609cc7 - src/common/softfloat/source/f32_to_ui64.c
054b23a974fc8d0bab232be433c4e516e6c1250a - src/common/softfloat/source/f64_lt_quiet.c
dde685423af544e5359efdb51b4bf9457c67fa3b - src/common/softfloat/source/f32_sqrt.c
fb062ecbe62a1f5878fd47f0c61490f2bde279dd - src/common/softfloat/source/s_roundToI32.c
8e58f0258218475616ff4e6317516d40ad475626 - src/common/softfloat/source/f32_lt_quiet.c
ab19c6b50c40b8089cb915226d4553d1aa902b0e - src/common/softfloat/source/f64_to_i32_r_minMag.c
86fdc2472526375539216461732d1db6a9f85b55 - src/common/softfloat/source/s_roundPackToF32.c
9266c83f3e50093cc45d7be6ab993a0e72af1685 - src/common/softfloat/source/s_roundPackToF64.c
2e0fec421f4defd293cf55c5f3af7d91f4b7d2cc - src/common/softfloat/source/ui64_to_f32.c
68843a93e1f46195243ef1164f611b759cf19d17 - src/common/softfloat/source/f32_le_quiet.c
00ab2120f71117161d4f6daaa9b90a3036a99841 - src/common/softfloat/source/f32_to_ui32.c
d0f8f08c225b60d88b6358d344404ba9df3038ec - src/common/softfloat/source/s_normSubnormalF32Sig.c
0108fe6f0d394ad72083aff9bb58507f97a0b669 - src/common/softfloat/source/ui32_to_f64.c
7bc81f5bc894118c08bfd52b59e010bc068ed762 - src/common/softfloat/source/ui32_to_f32.c
0adfa7e174cdb488bb22b06642e14e7fc6f49c67 - src/common/softfloat/source/s_roundToI64.c
c3ce12c227d25bc0de48fbcf914fc208e2448741 - src/common/softfloat/source/f64_sub.c
b9fd15957f7ae5effeccb5d8adaa7434b43f44e1 - src/common/softfloat/source/s_roundToUI64.c
29396b7c23941024a59d5ea06698d2fbc7e1a6ca - src/common/softfloat/source/f64_to_i64.c
ae25eea499b3ea5bdd96c905fd0542da11083048 - src/common/softfloat/source/s_normRoundPackToF64.c
b22876b0695f58ee56143c9f461f1dde32fefbf3 - src/common/softfloat/source/f64_to_ui64.c
b8c5ccc1e511637d8b2ba2657de4937b80c01c07 - src/common/softfloat/source/f32_le.c
0126e0fceb1fa7912f4d5b8c3a6ebb4a048eb98a - src/common/softfloat/source/f16_to_f32.c
1ff879eca2a273293b5cd6048419b2d2d8063b93 - src/common/softfloat/source/f64_mulAdd.c
0e9694d551848d88531f5461a9b3b91611652e9a - src/common/softfloat/source/f64_to_ui32_r_minMag.c
5a5e0d9f1ee7e8c0d1d4f9fbcf6eba330a5f1792 - src/common/softfloat/source/f32_isSignalingNaN.c
bc992c88f3de09e3a82447cf06dbde7c6604f7f8 - src/common/softfloat/source/f64_to_f32.c
1a86a6948bf6768bd23a19f1f05d40968c1d2b15 - src/common/softfloat/source/f64_rem.c
50daf9186bc5d0180d1453c957164b136d5ffc89 - src/common/softfloat/source/f64_eq.c
09cb0cdb90eb23b53cd9c1a76ba26021084710d1 - src/common/softfloat/source/s_addMagsF32.c
9f4d355d85fbe998e243fe4c7bbf8ad23062b6e2 - src/common/softfloat/source/i64_to_f64.c
fd40a71c7ebf9d632a384fadf9487cfef4f3ea98 - src/common/softfloat/source/s_shiftRightJam128.c
aaf6ccb77a1a89fa055a0fb63513297b35e2e54b - src/common/softfloat/source/f64_le_quiet.c
38bd00e9c4d2f1354c611404cca6209a6c417669 - src/common/softfloat/source/s_countLeadingZeros64.c
d9a86343e6cc75714f65f690082dd4b0ba724be9 - src/common/softfloat/source/s_roundPackToF16.c
0bf499c0e3a54186fa32b38b310cc9d98ccdcfe3 - src/common/softfloat/source/f32_eq.c
d4b26dc407a891e9ff5324853f1845a99c5d5cd2 - src/common/softfloat/source/f32_to_i32.c
296c40b0589536cb9af3231ad3dcd7f2baaa6887 - src/common/softfloat/source/f64_lt.c
0d8e42636a3409a647291fdb388001c2b11bba07 - src/common/softfloat/source/f32_to_f16.c
9a60700ce25578100d83d529e49f08f71cf35e17 - src/common/softfloat/source/s_normSubnormalF16Sig.c
ec1a797b11f6e846928a4a49a8756f288bda1dfa - src/common/softfloat/source/i32_to_f64.c
729e790328168c64d65a1355e990274c249bbb3a - src/common/softfloat/source/f32_to_i32_r_minMag.c
9a5b93459ace2da23964da98617d6b18006fab86 - src/common/softfloat/source/s_countLeadingZeros8.c
84b0a01ba2a667eb28b166d45bd91352ead83e69 - src/common/softfloat/source/i64_to_f32.c
4b37be398b3e73ae59245f03b2ba2394fc902b4d - src/common/softfloat/source/s_normSubnormalF64Sig.c
6f83fa864007e8227ae09bb36a7fdc18832d4445 - src/common/softfloat/source/f32_mul.c
daeb408588738b3eb4c8b092d7f92ac597cf1fc6 - src/common/softfloat/source/f32_rem.c
a94c8c2bd74633027e52e96f41d24714d8081eb4 - src/common/softfloat/source/s_approxRecipSqrt_1Ks.c
69dc4cc63b2a9873a6eb636ee7cb704cbd502001 - src/common/softfloat/source/f64_to_ui32.c
50b3147f8413f0595a4c3d6e6eeab84c1ffecada - src/common/softfloat/source/s_normRoundPackToF32.c
bbc70102b30f152a560eb98e7a1a4b11b9ede85e - src/common/softfloat/source/f64_sqrt.c
760fd7c257a1f915b61a1089b2acb143c18a082e - src/common/softfloat/source/s_addMagsF64.c
ebb4f674b6213fec29761fc4e05c1e3ddeda6d17 - src/common/softfloat/source/f32_mulAdd.c
4445b1fbbd507144f038fd939311ff95bc2cf5f1 - src/common/softfloat/source/ui64_to_f64.c
871cb1a4037d7b4e73cb20ad18390736eea7ae36 - src/common/softfloat/source/f32_to_ui64_r_minMag.c
ce37cdce572a3b02d42120e81c4969b39d1a67b6 - src/common/softfloat/source/f64_to_i32.c
c29536f617d71fe30accac44b2f1df61c98a97dc - src/common/softfloat/source/f64_div.c
54cbeb5872a86e822bda852ec15d3dcdad4511ce - src/common/softfloat/source/f64_add.c
e7890082ce426d88b4ec93893da32e306478c0d1 - src/common/softfloat/source/s_approxRecipSqrt32_1.c
824383b03952c611154bea0a862da2b9e2a43827 - src/common/softfloat/source/s_subMagsF32.c
00c612847b3bd227a006a4a2697df85866b80315 - src/common/softfloat/source/s_mulAddF32.c
7c8e5ab3f9bf6b2764ce5fffe80b2674be566a12 - src/common/softfloat/source/softfloat_state.c
e4930e155580a0f5aa7f3694a6205bc9aebfe7aa - src/common/softfloat/source/f32_to_f64.c
1484fc96d7731695bda674e99947280a86990997 - src/common/softfloat/source/f32_to_i64.c
2960704c290f29aae36b8fe006884d5c4abcabb4 - src/common/softfloat/source/f32_div.c
23b76c1d0be64e27a6f7e2ea7b8919f1a45a8e7c - src/common/softfloat/source/f32_to_ui32_r_minMag.c
fe06512577e642b09196d46430d038d027491e9f - src/common/softfloat/source/f32_eq_signaling.c
5e6f9e120a17cc73297a35e4d57e4b9cbce01780 - src/common/softfloat/source/s_mul64To128.c
e0ad81cfb5d2c0e74dc4ece9518ca15ffc77beaf - src/common/softfloat/source/f32_roundToInt.c
d8b0c55a49c4fa0b040541db6d5ff634d7d103e7 - src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c
a6d5c83f6a0542b33ac9c23ac65ef69002cfff9d - src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c
8efb3f7cd3217b5cd25896b4bad058c72fe5b89a - src/common/softfloat/source/8086-SSE/specialize.h
3d0dbc0a672d039a6346e1c21ddf87ffc9181978 - src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c
d152bc457b655725185bdff42b36bb96d6e6715e - src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c
1dd1b424087d9c872684df0c1b4063b077992d5f - src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c
252c816378fddab616b1f2a61e9fedd549224483 - src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c
21a11759ed2afd746a47c4d78b67640c2d052165 - src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c
98a850359fe08a7e39212f89ce96014ba80910da - src/common/softfloat/source/8086-SSE/s_f16UIToCommonNaN.c
0cbae7a5abc336331d460cbd3640d2cda02af434 - src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c
4cd1d6cfca3936a39aab9bc0eb622f5c7c848be1 - src/common/softfloat/source/include/softfloat_types.h
1ded4df85ff5fa904fa54c27d681265425be1658 - src/common/softfloat/source/include/primitiveTypes.h
5f589a4d48cc59a0e5762303df9ea4a06ca398da - src/common/softfloat/source/include/softfloat.h
9d8a025889f3ec0e1cca7c4b52308158e1f39226 - src/common/softfloat/source/include/primitives.h
f118cad66d3c8ee17a52cec97cd3dc7e7a1cf2bc - src/common/softfloat/source/include/internals.h
14045fa6330dc6ed20d35eac5b4c5909631bca90 - src/common/src/nv_smg.c
abccf0a8732b881d904d937287ced46edcde45ac - src/nvidia/Makefile
c5f16fdf43ca3d2845d120c219d1da11257072b0 - src/nvidia/nv-kernel.ld
dcf4427b83cce7737f2b784d410291bf7a9612dc - src/nvidia/arch/nvalloc/unix/include/nv-reg.h
4750735d6f3b334499c81d499a06a654a052713d - src/nvidia/arch/nvalloc/unix/include/nv-caps.h
3c61881e9730a8a1686e422358cdfff59616b670 - src/nvidia/arch/nvalloc/unix/include/nv_escape.h
7fc52a43b242a8a921c2707589fa07c8c44da11c - src/nvidia/arch/nvalloc/unix/include/nv.h
81592e5c17bebad04cd11d73672c859baa070329 - src/nvidia/arch/nvalloc/unix/include/nv-chardev-numbers.h
e69045379ed58dc0110d16d17eb39a6f600f0d1d - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-lockless-diag.h
d1b1a1bc1fa30c1a966e95447f7831a06340d2d0 - src/nvidia/arch/nvalloc/unix/include/nv-priv.h
7e0175a8006f06b1d5f5be078d851a4f01648b96 - src/nvidia/arch/nvalloc/unix/include/nv-nb-regs.h
2eb11e523a3ecba2dcd68f3146e1e666a44256ae - src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h
5f004c33f130e6c5cd275f9c85d46185e4e9b757 - src/nvidia/arch/nvalloc/unix/include/os_custom.h
499e72dad20bcc283ee307471f8539b315211da4 - src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h
824ffbe85c591c7423855bee7bf3193473ef2b70 - src/nvidia/arch/nvalloc/unix/include/osapi.h
669bd0c054b00a74e8996c18063fa9bbf5cd7690 - src/nvidia/arch/nvalloc/unix/include/os-interface.h
2ffd0138e1b3425ade16b962c3ff02a82cde2e64 - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numa.h
b3ecb82f142a50bdc37eafaeb86d67f10fbcf73f - src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h
af45762b6eeae912cc2602acf7dc31d30775ade7 - src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h
107d1ecb8a128044260915ea259b1e64de3defea - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h
3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h
98a5a3bd7b94e69f4e7d2c3a1769583c17ef5b57 - src/nvidia/arch/nvalloc/unix/src/os.c
a659a503a6fcffdcacd2b76ae6b1f156b4b9216c - src/nvidia/arch/nvalloc/unix/src/osmemdesc.c
b5ae9b8d551a3e5489605c13686fb6cce4579598 - src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c
a17aae37486b325442e447489b64add3694ab8b0 - src/nvidia/arch/nvalloc/unix/src/osunix.c
b5b409625fde1b640e4e93276e35248f0fccfa4c - src/nvidia/arch/nvalloc/unix/src/gcc_helper.c
07f9c0995f1fbbba9eb819321996b57c1d2b86cd - src/nvidia/arch/nvalloc/unix/src/exports-stubs.c
d8815125dbf79831b8fe55367bba60e7115243cc - src/nvidia/arch/nvalloc/unix/src/osinit.c
ef270b45ff3d72db9b319408c8bb060303e589f5 - src/nvidia/arch/nvalloc/unix/src/osapi.c
a7383deea9dcab093323d8dde1ede73f85f93343 - src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c
b1a6d0a1ca4307b8e8d9cf136c94ef7c9efbae4c - src/nvidia/arch/nvalloc/unix/src/registry.c
915ee6dbffff92a86d68ac38549b25aa1e146872 - src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c
ffea38efca6a43af9bc61bb6cb8c2b14c3d6fc20 - src/nvidia/arch/nvalloc/unix/src/escape.c
d1089d8ee0ffcdbf73a42d7c4edb90769aa79d8c - src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h
8530e3d1db60647a9132e10c2119a75295f18060 - src/nvidia/arch/nvalloc/common/inc/nv-firmware.h
1cd024cc06bba6f7c3663ca2d03fe25bd77761d3 - src/nvidia/generated/g_gpu_mgmt_api_nvoc.c
0be1c1ff5f200a9aa68cdf3d03bc4780e757a1ea - src/nvidia/generated/g_traceable_nvoc.h
998d18bc2f6e2cdd00cf383000b66be8e8778baa - src/nvidia/generated/g_nv_debug_dump_nvoc.h
4491368ac52cfda834bdd24df3b6f156c32ec3a9 - src/nvidia/generated/g_client_nvoc.c
4eb2331b2f9f8d8c01d62ad771702e9b42f22b65 - src/nvidia/generated/g_lock_stress_nvoc.h
6b5bf7b2f5dd000bfa2949e14642dd582ba4a378 - src/nvidia/generated/g_event_buffer_nvoc.h
cd5f4b0bc23710e5b6277ff214a62c4993e95581 - src/nvidia/generated/g_code_coverage_mgr_nvoc.c
b9903d23010ea9d63117c27d5fe0cfba09849fa4 - src/nvidia/generated/g_context_dma_nvoc.c
4b7aaad308f2f25b07d932fc0fe0c3327db522a9 - src/nvidia/generated/g_objtmr_nvoc.h
7bd355d08dc6f2509db22ed56f1c05ab97f5f620 - src/nvidia/generated/g_allclasses.h
4eea9bd7952613f08af07508e2e9c1c0344940e7 - src/nvidia/generated/g_gpu_mgr_nvoc.h
c5cad88aa7de5a04a3b6f9836f355347448d6a7b - src/nvidia/generated/g_rmconfig_util.h
db1d1e047d00780efbe4c1c1ae6e4fecd3ab49e8 - src/nvidia/generated/g_os_desc_mem_nvoc.h
1ec59322d0874153252a387dcb50bf6d7328d56e - src/nvidia/generated/g_system_mem_nvoc.c
21e57b9c63e847eeb5a29c218db2c5c37db83298 - src/nvidia/generated/g_gpu_nvoc.c
4613f3d42dbc899b278fca71c3aaae79159d7dbe - src/nvidia/generated/g_gpu_user_shared_data_nvoc.c
b55573cb02ff8129aa4f5aa050ac53d1f4fcfdb2 - src/nvidia/generated/g_rs_resource_nvoc.h
16c8d551a3a908ec194d39c88c5603cea436c9b7 - src/nvidia/generated/g_binary_api_nvoc.c
a232e1da560db2322a921a9f0dc260ad703af2b4 - src/nvidia/generated/g_mem_nvoc.h
c503ca5954b8f6ebdba96904a1616a55ce08a2d3 - src/nvidia/generated/g_device_nvoc.c
e7cc58e9f8173583bd253fa73df56324e48aa5ad - src/nvidia/generated/g_io_vaspace_nvoc.h
b93ab0b9e39ca3c5b397cbdba58e4d9894d4130f - src/nvidia/generated/g_rpc-structures.h
afda2b8579ed309e23be0ad1a835ee84fcbe535f - src/nvidia/generated/g_client_nvoc.h
e97edab623386f7d1534b4f053a66fc8659167f6 - src/nvidia/generated/g_event_nvoc.h
f4b2bffbdbb2b0b398e8dfe3420e46b2bf27839c - src/nvidia/generated/g_hal_nvoc.h
4626f4a1a4eadc3695d79454db25bd0153d1165d - src/nvidia/generated/g_resource_fwd_decls_nvoc.h
30035e0fb1ae8b816fc42b78a17eb30462640ce4 - src/nvidia/generated/g_kernel_head_nvoc.h
52ae6273ddf101e9715aed99991506cad8e96859 - src/nvidia/generated/g_disp_inst_mem_nvoc.c
abc769851bd523ee08cf829bf3864cf5475066ec - src/nvidia/generated/g_subdevice_nvoc.h
255c404719b18c2a3aec2a47948c0fbcf4affd4b - src/nvidia/generated/rmconfig.h
c7fda8cbe109ad2736694ce9ec0e2ab93d0e3f2c - src/nvidia/generated/g_mem_list_nvoc.h
f9bdef39159a8475626a0edcbc3a53505a0ff80a - src/nvidia/generated/g_os_hal.h
dc7bbba203ee5ff91b6f14eb3abfad8c15854e1d - src/nvidia/generated/g_mem_desc_nvoc.h
1702c9d021149c0f5c73ebeda7bea29e246af31d - src/nvidia/generated/g_nv_name_released.h
2e0c45e4186d44774286a71daf797c980c2ddf7a - src/nvidia/generated/g_objtmr_nvoc.c
9b78bc02a8fe0ec297167bb4bdb7f8255b94198b - src/nvidia/generated/g_disp_capabilities_nvoc.h
967d8c0d7d5c1271e82f30af992f48322695d367 - src/nvidia/generated/g_eng_state_nvoc.h
831cdf0767703c00918e70ef3933716b201781f1 - src/nvidia/generated/g_syncpoint_mem_nvoc.c
ce74dbd8f88f50af0b3ea3b3034395cd98eb08e8 - src/nvidia/generated/g_gpu_access_nvoc.c
08ad957117efefe2e04448bce1cad2dec0e984af - src/nvidia/generated/g_odb.h
033a6d6bac0829783afe8a582fa6c4f329be7f04 - src/nvidia/generated/g_hypervisor_nvoc.h
c1471919f6c19e1b576b7c636ba5ae7ab9d58177 - src/nvidia/generated/g_gpu_db_nvoc.c
f68b7e209e268d14b0b98686d1766683139b9b5f - src/nvidia/generated/g_system_nvoc.c
cdcab5a0094b9e9664f7a0e62ec31783617de5ab - src/nvidia/generated/g_code_coverage_mgr_nvoc.h
5e614b6db957a0ae77502ca6d5966bca506f8020 - src/nvidia/generated/g_gpu_group_nvoc.h
eb15207a28b8eed41182de6311ec48f5e321729f - src/nvidia/generated/g_gpu_user_shared_data_nvoc.h
ef9def144aaf1b2b292c9815c68a6007eff56dda - src/nvidia/generated/g_rs_server_nvoc.c
eb07ee114f8cfc039978cdb7501c3ea03c879864 - src/nvidia/generated/g_generic_engine_nvoc.c
d2f3d17e05337992bc031c823186583d62c10235 - src/nvidia/generated/g_chips2halspec_nvoc.h
ad94c2430328b91392db363158fa2279b794cc54 - src/nvidia/generated/g_gpu_resource_nvoc.h
c77048521f9c9890f14108c2c5457d78a85fe69d - src/nvidia/generated/g_gpu_access_nvoc.h
38a98487eec65d8807e47f99b013619c1537e983 - src/nvidia/generated/g_dce_client_nvoc.c
d09bde39b1f12490ea0a696d6915d521c9f13953 - src/nvidia/generated/g_rpc-message-header.h
9becba61ba5ff7580b353abfb87cbe0f37817195 - src/nvidia/generated/g_binary_api_nvoc.h
50f70075eac2515b189e2d07a06b13cfa826945f - src/nvidia/generated/g_rs_client_nvoc.h
f8b984c6bc09554753cfe6692dde2eb3171abc57 - src/nvidia/generated/g_disp_channel_nvoc.h
4931b316fc042705a5f094c8c23b0038f980b404 - src/nvidia/generated/g_generic_engine_nvoc.h
2a28557874bd51f567ef42c75fd4e3b09d8ad44d - src/nvidia/generated/g_gpu_arch_nvoc.c
a17058fe665949f1e3861fe092e29b229cefbe62 - src/nvidia/generated/g_mem_mgr_nvoc.h
7aa02b964507a8269d35dc56170955025b98bd1a - src/nvidia/generated/g_gpu_arch_nvoc.h
0b9296f7797325b80ff0900f19a3763b564eb26b - src/nvidia/generated/g_context_dma_nvoc.h
4210ff36876e84e0adf1e9d4afb6654c7e6e5060 - src/nvidia/generated/g_resserv_nvoc.h
3613b4ec9b285a4e29edefa833704789c887c189 - src/nvidia/generated/g_tmr_nvoc.c
517b6b986a3749c9a6dd0f22bbef6569cdb48d97 - src/nvidia/generated/g_rs_client_nvoc.c
7670f19682bcd6224c999a8f80e770368e735632 - src/nvidia/generated/g_lock_stress_nvoc.c
b348b1b465cb359ca3cf10f5e121714ffb95b582 - src/nvidia/generated/g_standard_mem_nvoc.c
54fa23e7cf0f07d625c25d5c08dad9cd1714f851 - src/nvidia/generated/g_standard_mem_nvoc.h
7e528d775caa7ff2bf4159c94fc2c2e4d3aadffc - src/nvidia/generated/g_chips2halspec_nvoc.c
40aa2c65168c893c725c983b2219ceff03d05608 - src/nvidia/generated/g_gpu_halspec_nvoc.h
17c4ce5e67bf8bc8f48a4e2b1b7752d4597703ad - src/nvidia/generated/g_kernel_head_nvoc.c
3ad8329c7f7d63633b7abf2cdd502e4257fa1726 - src/nvidia/generated/g_event_nvoc.c
7aba35752cd4c6447f844cd9432d7dc1bc77b33d - src/nvidia/generated/g_disp_capabilities_nvoc.c
fa3a5418a5d6bd7fb2b375ed7f7b64293fdf5f86 - src/nvidia/generated/g_ioaccess_nvoc.h
3c3961ddf6422294c3322e3b0a3c97ee94bfd010 - src/nvidia/generated/g_gpu_mgr_nvoc.c
b73b22368abf741cc0a5108b6c9585a81de28b57 - src/nvidia/generated/g_hal.h
6e219df1367ce7dc8f5f4a1f2209a7808a927871 - src/nvidia/generated/g_hal_mgr_nvoc.c
279538daf54163a7a53aab1330fba2c00fc3f234 - src/nvidia/generated/g_rmconfig_util.c
49e84272bbce137683232275b4f13a19c644c650 - src/nvidia/generated/g_prereq_tracker_nvoc.h
57eb0772bc280690eade3f5d54f786e252c75099 - src/nvidia/generated/g_object_nvoc.c
113297c44e702cd6535e007c1c5b2dd5e6f809dc - src/nvidia/generated/g_ioaccess_nvoc.c
216040d1883e8c4f1e8b47d9f6b279ec111d094d - src/nvidia/generated/g_hal_mgr_nvoc.h
113b10cf6cef2608ff4a288e2944d56da64f355d - src/nvidia/generated/g_gpu_group_nvoc.c
86bb88ccdfa34510d4acf21684e5b8bd32d820b2 - src/nvidia/generated/g_disp_sf_user_nvoc.h
5c0ed2e135f53ca09fbfb542bea88b304a2e1208 - src/nvidia/generated/g_event_buffer_nvoc.c
979082b8c018eee55d880265f7bfd294360816c6 - src/nvidia/generated/g_hda_codec_api_nvoc.c
f917323efc9429fcea8643eb9a8d5ee46b1b50a5 - src/nvidia/generated/g_eng_state_nvoc.c
437329a9c6e35e4b02945ec035448e704521280e - src/nvidia/generated/g_hda_codec_api_nvoc.h
fba7a2891fe10e837f5897034b8176a7307fbb12 - src/nvidia/generated/g_lock_test_nvoc.h
05269b7e73347b580f11decf0e1b9f467d0cb60c - src/nvidia/generated/g_dce_client_nvoc.h
e175ab2ef1fd5b64c9f0d665a26b2ed6f864b106 - src/nvidia/generated/g_vaspace_nvoc.h
cc7ec616b034ec01da1c5176b6c62759c3f31a06 - src/nvidia/generated/g_subdevice_nvoc.c
93f9738c0e8aa715592306ddf023adf6b548dcc4 - src/nvidia/generated/g_nvh_state.h
1745f3002758556d1b6d11a24d088ef87ba18bd5 - src/nvidia/generated/g_virt_mem_mgr_nvoc.c
8c9f26e959fa9a6a3c4a5cb8875458cc4a9bfe9e - src/nvidia/generated/g_os_nvoc.c
3b0e038829647cfe0d8807579db33416a420d1d2 - src/nvidia/generated/g_chips2halspec.h
a1fad555b8ad36437992afdd6e3e08d236167ac7 - src/nvidia/generated/g_journal_nvoc.h
d210a82e3dda39239201cfc1c2fcb2e971915c1e - src/nvidia/generated/g_device_nvoc.h
836f88914b046eadad9435786e1b474ee6690f5f - src/nvidia/generated/g_gpu_nvoc.h
ea0d27b0f05818e2e44be7d04b31f8843e1d05b7 - src/nvidia/generated/g_io_vaspace_nvoc.c
10529db24fb0501aa7f2aae25e0a87247ab5405c - src/nvidia/generated/g_resource_nvoc.h
5d47bed309c731bfee4144f61093192e7efcaa55 - src/nvidia/generated/g_disp_channel_nvoc.c
8771d8f2cf58f5e1d91ece01c1962677cebc5e4b - src/nvidia/generated/g_rmconfig_private.h
951c1c8969a621344d4d2a3ec61b1ad51b39ea79 - src/nvidia/generated/g_client_resource_nvoc.c
629b6daac6c9215dc982973b6adcf84314d34d57 - src/nvidia/generated/g_gpu_halspec_nvoc.c
29d5ccf874298c8156314a6eb23c209f2920b779 - src/nvidia/generated/g_gpu_resource_nvoc.c
fc26ab853e7c981c271ced30dfd78d95cd9bcdfd - src/nvidia/generated/g_gpu_db_nvoc.h
aa76beb8b33254fae884434b688093f9c7f12c87 - src/nvidia/generated/g_hal_private.h
86739259b5059c9b9ea3061bd8d1846385cb95f4 - src/nvidia/generated/g_sdk-structures.h
41bc858f6aca964a8977ad96911ecf1e8b46385d - src/nvidia/generated/g_hal_archimpl.h
f87916eae53dbea2f6bdbe80a0e53ecc2071d9fd - src/nvidia/generated/g_lock_test_nvoc.c
6b8597803d509372152e3915f15139186294add5 - src/nvidia/generated/g_gpu_class_list.c
2101385d1332db9a2902370a6b3c6117ca8b2737 - src/nvidia/generated/g_kern_disp_nvoc.h
d71ff42bc0fc0faf1999a6cbe88c4492a47e200e - src/nvidia/generated/g_os_nvoc.h
e58abb783f7561d0af925c2fca392c5165fcb199 - src/nvidia/generated/g_kern_disp_nvoc.c
d6a34926ab710156c9c4b2d9f12a44e6dafd43d1 - src/nvidia/generated/g_tmr_nvoc.h
c4c67b0e0284656b32c7b4547e22d521c442124a - src/nvidia/generated/g_disp_objs_nvoc.h
8e49b4d77641c98c6101dbc88a79290ceca6271a - src/nvidia/generated/g_rs_server_nvoc.h
af206c390549eff5d690ad07f3e58cd417f07f5f - src/nvidia/generated/g_hal_register.h
be659882e731b6a2019639265af46239c5c96ebf - src/nvidia/generated/g_hal_nvoc.c
db76e8669776fbfa901c60d9b9908af2fabc4703 - src/nvidia/generated/g_virt_mem_mgr_nvoc.h
797bd0197236fb0afc2c7e052487db803ac5baf0 - src/nvidia/generated/g_rs_resource_nvoc.c
884bed29fb4735ae0b4504fc874702acd29ee541 - src/nvidia/generated/g_mem_mgr_nvoc.c
3168beb42f15591a50339692d502e04977615a7b - src/nvidia/generated/g_prereq_tracker_nvoc.c
8e0071daaf5471a0fb3856705ec993704eaed4b5 - src/nvidia/generated/g_disp_inst_mem_nvoc.h
fb464cf839a1e76ac2a27346c7cd46ca921f1f56 - src/nvidia/generated/g_traceable_nvoc.c
8588d6f88ab5e8682952063fe0e2c840b334c622 - src/nvidia/generated/g_eng_desc_nvoc.h
de99523103dd7df0934cbe7aa21179ec7f241817 - src/nvidia/generated/g_os_desc_mem_nvoc.c
aa43dd8bdbdc71dc64d65e948221c7d5235588e7 - src/nvidia/generated/g_disp_objs_nvoc.c
9b6cc3a5e9e35139e9245cbe753fe9a552a488c0 - src/nvidia/generated/g_syncpoint_mem_nvoc.h
ae311b0968df9e9c9c2cec89e3060c472fc70a4c - src/nvidia/generated/g_mem_nvoc.c
dc7a782be9a0096701771cb9b2dc020c2f814e6d - src/nvidia/generated/g_system_nvoc.h
93a47004dd1c7529c6ee5f8abdf8b49c336fb681 - src/nvidia/generated/g_disp_sf_user_nvoc.c
3b5dfad8fccd7251cc177c7ea1b90265b4b6c901 - src/nvidia/generated/g_gpu_mgmt_api_nvoc.h
b53ec15a1aaf102d42b79881cd1b270afeb7205c - src/nvidia/generated/g_system_mem_nvoc.h
67b2d3ea81ebe7be679bcafc688ced0d64f16edf - src/nvidia/generated/g_object_nvoc.h
b1be7145e70d8811fbdbe07c0e99f32ad0e38429 - src/nvidia/generated/g_client_resource_nvoc.h
0d5b87b117d39b173a2a21a5cd71572bc2b26697 - src/nvidia/generated/g_resource_nvoc.c
51df7972f9932c2a5d800d4e2b3e4828e5aa2038 - src/nvidia/generated/g_vaspace_nvoc.c
0820fa0a975b2474ce0fdf64508cbd7758f60e5c - src/nvidia/generated/g_ref_count_nvoc.h
fff3ebc8527b34f8c463daad4d20ee5e33321344 - src/nvidia/inc/lib/ref_count.h
ec26741397ebd68078e8b5e34da3b3c889681b70 - src/nvidia/inc/lib/base_utils.h
f8d9eb5f6a6883de962b63b4b7de35c01b20182f - src/nvidia/inc/lib/protobuf/prb.h
601edb7333b87349d791d430f1cac84fb6fbb919 - src/nvidia/inc/lib/zlib/inflate.h
671c628ff9d4e8075f953766adcab9bfc54bd67c - src/nvidia/inc/libraries/poolalloc.h
1e8730e4abd210e3c648ef999ccc2b1f1839b94c - src/nvidia/inc/libraries/field_desc.h
8dd7f2d9956278ed036bbc288bff4dde86a9b509 - src/nvidia/inc/libraries/eventbufferproducer.h
1b28bd0ee2e560ca2854a73a3ee5fb1cf713d013 - src/nvidia/inc/libraries/nvoc/utility.h
d3cd73c0c97a291e76e28a6e3834d666e6452172 - src/nvidia/inc/libraries/nvoc/prelude.h
79b556739f0648cec938f281794663433fc5e048 - src/nvidia/inc/libraries/nvoc/runtime.h
91c67f272f0ada6f386e9f4a78fbde70aa5c883d - src/nvidia/inc/libraries/nvoc/object.h
c0f66cf7b2fb6ca24b5d4badede9dcac0e3b8311 - src/nvidia/inc/libraries/nvoc/rtti.h
a3db778e81f7188a700e008e4c5f5b1320ab811e - src/nvidia/inc/libraries/mmu/gmmu_fmt.h
1daea206ab581fa3554ff1811e1253a7d0053ac0 - src/nvidia/inc/libraries/mmu/mmu_fmt.h
56b8bae7756ed36d0831f76f95033f74eaab01db - src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h
b8e52b576e6668e4de7ea65a31e12c2bb491a591 - src/nvidia/inc/libraries/mapping_reuse/mapping_reuse.h
e772583f7fbf994fcf923d527d42372a716b4c57 - src/nvidia/inc/libraries/ioaccess/ioaccess.h
26853c886d848fb88e14da3aceab23f90589c05d - src/nvidia/inc/libraries/utils/nvprintf_level.h
c314121149d3b28e58a62e2ccf81bf6904d1e4bc - src/nvidia/inc/libraries/utils/nvmacro.h
72dcc09b77608263573bd34adf09393328eddf86 - src/nvidia/inc/libraries/utils/nvrange.h
b598ccd2721892b6915d4be432f1fc332477b666 - src/nvidia/inc/libraries/utils/nvbitvector.h
9aa5870d052a45c2489a6ea1a4f2e30fbc52d6be - src/nvidia/inc/libraries/utils/nv_enum.h
4849eb6c567e3ba952c22e702461c1a84ec88c6a - src/nvidia/inc/libraries/utils/nvprintf.h
1b265cb4fcc628862e4b27ae63a897871987eb76 - src/nvidia/inc/libraries/utils/nvassert.h
39113db75fdab5a42f9d8653ed1c90018b8b1df4 - src/nvidia/inc/libraries/containers/map.h
11ce1423312f4c34df19672e45678d0531cc299d - src/nvidia/inc/libraries/containers/ringbuf.h
5f116730f8b7a46e9875850e9b6ffb2a908ad6c2 - src/nvidia/inc/libraries/containers/btree.h
fc211c8276ebcee194080140b5f3c30fba3dfe49 - src/nvidia/inc/libraries/containers/queue.h
661b551f4795f076d7d4c4dab8a2ae2f52b0af06 - src/nvidia/inc/libraries/containers/list.h
47c69b04f95664e742f1a0a02711eeb1fb71000b - src/nvidia/inc/libraries/containers/eheap_old.h
5da20ecad3ff8405dea782792c6397d21ba76f7c - src/nvidia/inc/libraries/containers/vector.h
bcfc41a04576a4244c9dc3fe2a32c8e582f16c3e - src/nvidia/inc/libraries/containers/type_safety.h
5cabf8b70c3bb188022db16f6ff96bcae7d7fe21 - src/nvidia/inc/libraries/containers/multimap.h
4e26106c9c758c9e48418451ac01cf591ed74a31 - src/nvidia/inc/libraries/nvlog/nvlog_printf.h
41843197a5c11abc93df89b8f10a5f815e7fe6af - src/nvidia/inc/libraries/nvlog/nvlog.h
13aedc8ccf6acdd71be71b2219f79cd1af411273 - src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h
7c9c9456aaacbeffa11a9af54fe2250095ebbb00 - src/nvidia/inc/libraries/tls/tls.h
87a130551593551380ac3e408f8044cc0423c01a - src/nvidia/inc/libraries/nvport/nvport.h
2487ffc1eb1e50b27ba07e0581da543d80bdaa72 - src/nvidia/inc/libraries/nvport/safe.h
4bf45849bc1c6b89d7a79d761cce84a1d5026eac - src/nvidia/inc/libraries/nvport/debug.h
147d47ef4bd860394d1d8ae82c68d97887e2898b - src/nvidia/inc/libraries/nvport/core.h
6d698ca4fc5e48c525f214a57e1de0cc4aa9e36b - src/nvidia/inc/libraries/nvport/thread.h
6065fa9a525d80f9b61acb19e476066823df0700 - src/nvidia/inc/libraries/nvport/sync.h
a1d93b6ec8ff01a3c2651e772a826ee11a7781d7 - src/nvidia/inc/libraries/nvport/util.h
fb5a011275328b7c1edc55abc62e604462b37673 - src/nvidia/inc/libraries/nvport/atomic.h
16a35b2b6fd6eb855acd64d72480b285795f54b2 - src/nvidia/inc/libraries/nvport/memory.h
f31ed19d0588861b8c2b1489dd4e70d430110db5 - src/nvidia/inc/libraries/nvport/crypto.h
96c7c30c9f6503675f0903a16207a0ac06a6963d - src/nvidia/inc/libraries/nvport/cpu.h
53d843988669f61528cd45099ced749defa4cf7e - src/nvidia/inc/libraries/nvport/string.h
d1863efe7b8a63f1c5a7f47856b95ad31fd1a561 - src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h
9596b274389ea56acff6ca81db8201f41f2dd39d - src/nvidia/inc/libraries/nvport/inline/atomic_clang.h
a8c9b83169aceb5f97d9f7a411db449496dc18f6 - src/nvidia/inc/libraries/nvport/inline/util_generic.h
bbece45965ffbc85fbd383a8a7c30890c6074b21 - src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h
a7cb79bf7ac48e0f5642ecfd2e430bb85587dddf - src/nvidia/inc/libraries/nvport/inline/memory_tracking.h
1d6a239ed6c8dab1397f056a81ff456141ec7f9c - src/nvidia/inc/libraries/nvport/inline/util_valist.h
f267235fd8690e1b1d7485d3a815841607683671 - src/nvidia/inc/libraries/nvport/inline/safe_generic.h
645734ed505a4d977490e54b26cdf49657e20506 - src/nvidia/inc/libraries/nvport/inline/sync_tracking.h
a902e0f4265bd3dbd251afefa8ceb0389464d886 - src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h
2dec1c73507f66736674d203cc4a00813ccb11bc - src/nvidia/inc/libraries/resserv/rs_domain.h
fa5a5d8fa07cae6b8ef9d9135dc5d7e7624533d2 - src/nvidia/inc/libraries/resserv/resserv.h
972165721958839bc1d510fda9409d35ff89ec21 - src/nvidia/inc/libraries/resserv/rs_server.h
883bf7295d707014278e035f670d151275975d18 - src/nvidia/inc/libraries/resserv/rs_resource.h
2ad85ddca7cd230cea917e249871277ef1e59db1 - src/nvidia/inc/libraries/resserv/rs_client.h
cd033fe116a41285a979e629a2ee7b11ec99369f - src/nvidia/inc/libraries/resserv/rs_access_rights.h
df174d6b4f718ef699ca6f38c16aaeffa111ad3c - src/nvidia/inc/libraries/resserv/rs_access_map.h
5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - src/nvidia/inc/os/dce_rm_client_ipc.h
4aa45a3755ef172aa35279e87dd5cd83cab1bc2e - src/nvidia/inc/kernel/vgpu/rpc_hal_stubs.h
f2fd94a00e5debf1dc7f7ad4c00d417552fb0554 - src/nvidia/inc/kernel/vgpu/rpc.h
37598b6c25aac1a07cbc2bc5c76ebecdbca56eb6 - src/nvidia/inc/kernel/vgpu/rm_plugin_shared_code.h
fea4bbeb739723d3b80b5b3d8943e746e58fae07 - src/nvidia/inc/kernel/vgpu/dev_vgpu.h
f64d3723d0c475558bed799da8d2c5ec32a7d3a8 - src/nvidia/inc/kernel/vgpu/vgpuapi.h
8bf8282ce6112a2afb2e7f64d138d6ce90cf37c0 - src/nvidia/inc/kernel/vgpu/rpc_global_enums.h
69360faa428e157580fac445bcf601f44f7646c0 - src/nvidia/inc/kernel/vgpu/rpc_headers.h
b9af629ab29b527f7830b78f52b55b8535b8dbfd - src/nvidia/inc/kernel/vgpu/vgpu_util.h
e33b5b8c324c23d28e91324a87b47a24823dc5f5 - src/nvidia/inc/kernel/vgpu/rpc_vgpu.h
af9d17b204fdddc6f97280fdafd5a414ee8274dc - src/nvidia/inc/kernel/diagnostics/code_coverage_mgr.h
c6efd51b8b8447829a0867cd7fb7a5a5a2fb1e3d - src/nvidia/inc/kernel/diagnostics/traceable.h
fd780f85cb1cd0fd3914fa31d1bd4933437b791d - src/nvidia/inc/kernel/diagnostics/tracer.h
7e75b5d99376fba058b31996d49449f8fe62d3f0 - src/nvidia/inc/kernel/diagnostics/profiler.h
7615ac3a83d0ad23b2160ff8ad90bec9eb1f3c6c - src/nvidia/inc/kernel/diagnostics/journal.h
b259f23312abe56d34a8f0da36ef549ef60ba5b0 - src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h
7f3f19ed69089ba05f5cac44982547718dbf4662 - src/nvidia/inc/kernel/diagnostics/xid_context.h
3a28bf1692efb34d2161907c3781401951cc2d4f - src/nvidia/inc/kernel/diagnostics/journal_structs.h
8ef620afdf720259cead00d20fae73d31e59c2f7 - src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h
701375e96d771b4105f5fe4949ed4a542be4f3d7 - src/nvidia/inc/kernel/os/os_stub.h
408c0340350b813c3cba17fd36171075e156df72 - src/nvidia/inc/kernel/os/os.h
c8496199cd808ed4c79d8e149961e721ad96714e - src/nvidia/inc/kernel/os/capability.h
cda75171ca7d8bf920aab6d56ef9aadec16fd15d - src/nvidia/inc/kernel/os/nv_memory_type.h
70b67003fda6bdb8a01fa1e41c3b0e25136a856c - src/nvidia/inc/kernel/os/nv_memory_area.h
497492340cea19a93b62da69ca2000b811c8f5d6 - src/nvidia/inc/kernel/rmapi/event_buffer.h
499c3d0d76276ee9441d57948ea97877c48b1daa - src/nvidia/inc/kernel/rmapi/rmapi.h
b4bae9ea958b4d014908459e08c93319784c47dd - src/nvidia/inc/kernel/rmapi/event.h
0500c41247fdecd66f25428d279c6dab72bab13e - src/nvidia/inc/kernel/rmapi/binary_api.h
61e3704cd51161c9804cb168d5ce4553b7311973 - src/nvidia/inc/kernel/rmapi/resource.h
2baec15f4c68a9c59dd107a0db288e39914e6737 - src/nvidia/inc/kernel/rmapi/client.h
ac9288d75555180c1d5dd6dd7e0e11fb57a967f2 - src/nvidia/inc/kernel/rmapi/exports.h
835f193521f216d29c678a6018cd9791914b6c01 - src/nvidia/inc/kernel/rmapi/lock_stress.h
b9ff9b201bf2df8651f0c408158aa617638868f6 - src/nvidia/inc/kernel/rmapi/rmapi_specific.h
20adc296ffe79f27d5c24c70716c972a2e0c9a5d - src/nvidia/inc/kernel/rmapi/control.h
deed1715907c1dab8e3304bd4f63b688b72104b7 - src/nvidia/inc/kernel/rmapi/mapping_list.h
4453fe6463e3155063f2bdbf36f44697606a80a5 - src/nvidia/inc/kernel/rmapi/client_resource.h
6cc2de07b21fb21cef1b5b87fb2f1c935782262c - src/nvidia/inc/kernel/rmapi/rs_utils.h
35a65c31c6dcc2824011245ff6e2d5a30f95525c - src/nvidia/inc/kernel/rmapi/rmapi_utils.h
a92dbf2870fe0df245ea8967f2f6a68f5075ecaf - src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h
23e243f9abcb2a4f2d10d141303cd55677b04436 - src/nvidia/inc/kernel/rmapi/rmapi_cache_handlers.h
2724476b61b1790f1b7c293cc86e8a268125e11c - src/nvidia/inc/kernel/rmapi/param_copy.h
15f788614e08d805e963653460858cf013fe0178 - src/nvidia/inc/kernel/rmapi/lock_test.h
2b23f2dbd8f3f63a17a1b63ebb40a2fd7fd8801a - src/nvidia/inc/kernel/rmapi/alloc_size.h
893ec596aab365c2ff393bf2b96aea57f37d01f8 - src/nvidia/inc/kernel/platform/nvpcf.h
5e9928552086947b10092792db4a8c4c57a84adf - src/nvidia/inc/kernel/platform/acpi_common.h
e762205698aff945603324331b443bb2f20cf778 - src/nvidia/inc/kernel/platform/sli/sli.h
15754215ec49815f547dd999b2262a34670dde0b - src/nvidia/inc/kernel/core/locks.h
bdc4ab675c6f6c4bd77c3aaf08aa5c865b186802 - src/nvidia/inc/kernel/core/hal.h
ad378b09a277fba0efd3291d167e1d21071bdf1b - src/nvidia/inc/kernel/core/printf.h
a054be86a4476ba7b9a97052dfcfa4155e059cb9 - src/nvidia/inc/kernel/core/info_block.h
bffae4da6a1f9b7dc7c879587fd674b49b46dac1 - src/nvidia/inc/kernel/core/core.h
37f267155ddfc3db38f110dbb0397f0463d055ff - src/nvidia/inc/kernel/core/strict.h
b00302aec7e4f4e3b89a2f699f8b1f18fc17b1ba - src/nvidia/inc/kernel/core/hal_mgr.h
2d741243a6ae800052ddd478cc6aa7ad0b18f112 - src/nvidia/inc/kernel/core/prelude.h
ebc7c06d9e94218af4cf6b0c03e83650e391e5bc - src/nvidia/inc/kernel/core/thread_state.h
b5859c7862fb3eeb266f7213845885789801194a - src/nvidia/inc/kernel/core/system.h
07f45cd5fab5814e21b9e84425564b43776118fd - src/nvidia/inc/kernel/gpu/gpu_resource_desc.h
7010ff346c27b6453c091f5577672b8b1821808d - src/nvidia/inc/kernel/gpu/gpu_access.h
10ba0b9d4c67c8027b391073dab8dc4388f32fd7 - src/nvidia/inc/kernel/gpu/nvbitmask.h
59f72837997cb0c8ffc491d9a61c61e61b9dca94 - src/nvidia/inc/kernel/gpu/gpu_shared_data_map.h
bca121fb72d54afd714654f1a50eb7192da3135f - src/nvidia/inc/kernel/gpu/gpu_uuid.h
3f0f23a15201105779f3d25dc7628b42990c4b7e - src/nvidia/inc/kernel/gpu/gpu_timeout.h
1ac9c8bf155d1f25f790032b2b6306223199d9ff - src/nvidia/inc/kernel/gpu/gpu_arch.h
f17b704f2489ffedcc057d4a6da77c42ece42923 - src/nvidia/inc/kernel/gpu/gpu_resource.h
28d0d82b58ef13662e8896d3bbc42d340836294e - src/nvidia/inc/kernel/gpu/gpu_user_shared_data.h
e33e4d1537839e41898ff0fab8949e90ee1aed46 - src/nvidia/inc/kernel/gpu/gpu_device_mapping.h
426c6ab6cecc3b1ba540b01309d1603301a86db1 - src/nvidia/inc/kernel/gpu/eng_desc.h
5f5677bee452c64a1b890c3eb65e81fda66ddbaa - src/nvidia/inc/kernel/gpu/error_cont.h
d624e0c45cc8ad24e8c0b2fb5281c0c8a1c7a6d3 - src/nvidia/inc/kernel/gpu/gpu_engine_type.h
c33ab6494c9423c327707fce2bcb771328984a3c - src/nvidia/inc/kernel/gpu/gpu_halspec.h
145b1bc37e6c36b466ea33dd0579d22b530d8dd3 - src/nvidia/inc/kernel/gpu/kern_gpu_power.h
c771936af1de030194894db1312d847038ddb0cb - src/nvidia/inc/kernel/gpu/gpu_child_list.h
0e8353854e837f0ef0fbf0d5ff5d7a25aa1eef7c - src/nvidia/inc/kernel/gpu/eng_state.h
76b24227c65570898c19e16bf35b2cad143f3d05 - src/nvidia/inc/kernel/gpu/gpu.h
0a0c9a8f27feec3e90e15ce9879532ec77450de5 - src/nvidia/inc/kernel/gpu/gpu_acpi_data.h
9ed922ffed4454a10c5e2d8b3123ed653ec653e4 - src/nvidia/inc/kernel/gpu/gpu_ecc.h
f2947fefcaf0611cd80c2c88ce3fdea70953c1ed - src/nvidia/inc/kernel/gpu/gpu_child_class_defs.h
efc50bb2ff6ccf1b7715fd413ca680034920758e - src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h
24d01769b39a6dd62574a95fad64443b05872151 - src/nvidia/inc/kernel/gpu/subdevice/subdevice.h
576216219d27aa887beeccefc22bcead4d1234d7 - src/nvidia/inc/kernel/gpu/disp/kern_disp.h
277a2719f8c063037c6a9ed55ade2b1cb17f48ae - src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h
51a209575d3e3fe8feb7269ece7df0846e18ca2a - src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h
d0899f0e55e6675e267d4c72577be52e39b66121 - src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h
be7da8d1106ee14ff808d86abffb86794299b2df - src/nvidia/inc/kernel/gpu/disp/disp_objs.h
74bc902cd00b17da3a1dfa7fd3ebc058de439b76 - src/nvidia/inc/kernel/gpu/disp/disp_channel.h
b39826404d84e0850aa3385691d8dde6e30d70d4 - src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h
24397d051c941427e54cefc1062d8cd977a8725e - src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h
9a33a37c6cea9bad513aa14c942c689f28f7c0d8 - src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h
5179f01acf7e9e251552dc17c0dcd84f7d341d82 - src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h
22fc153d91a3917ac8e3f2aa94f0d52bfb11f7c2 - src/nvidia/inc/kernel/gpu/hfrp/kern_hfrp_commands_responses.h
173e9ecd2224a5259c79f2491302ba4415e82f70 - src/nvidia/inc/kernel/gpu/hfrp/kernel_hfrp.h
3118f2e9b47cfac98a92d195ce67ea63e50bf3ab - src/nvidia/inc/kernel/gpu/hfrp/kern_hfrp_common.h
1feab39692ea8796ac7675f4780dfd51e6e16326 - src/nvidia/inc/kernel/gpu/timer/objtmr.h
0cff83f4fdcc8d025cd68e0a12faaeead09fa03b - src/nvidia/inc/kernel/gpu/timer/tmr.h
71dd4fccd3b601508230a2b8b720aaf531a160ff - src/nvidia/inc/kernel/gpu/gsp/gsp_trace_rats_macro.h
e1979c71f3d5ffc92bf2306f9360b70bca0edf1f - src/nvidia/inc/kernel/gpu/gsp/message_queue.h
23d38dc3e66affac9342a839f5ba0d79a40f63ba - src/nvidia/inc/kernel/gpu/gsp/kernel_gsp_trace_rats.h
bb9b8ec9840109b15c174da02e7ac85c1e2c0c70 - src/nvidia/inc/kernel/gpu/rpc/objrpc.h
1cc21ad9136024f7437ef745db6652343588c50a - src/nvidia/inc/kernel/gpu/rpc/objrpcstructurecopy.h
7b7cf3b6459711065d1b849bf5acaea10b6400ca - src/nvidia/inc/kernel/gpu/intr/intr_common.h
1e3bebe46b7f2f542eedace554a4156b3afb51f1 - src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h
97d0a067e89251672f191788abe81cf26dcb335f - src/nvidia/inc/kernel/gpu/device/device.h
889ba18a43cc2b5c5e970a90ddcb770ce873b785 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h
6756126ddd616d6393037bebf371fceacaf3a9f1 - src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h
e4c67260b5cb693d695ad3d8aa96aaed45688322 - src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h
20416f7239833dcaa743bbf988702610e9251289 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h
407cad27681bde8235305464150e275a4a93b5d5 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h
5be45f3abdbb65a8eea959d98499ea8ff9a79de9 - src/nvidia/inc/kernel/gpu/mem_mgr/rm_page_size.h
76de30ac7b722cc5d59fc834d6b9c795ec14d7a5 - src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h
ce4e0f7177f46f4fc507a68b635e5395a3f7dde6 - src/nvidia/inc/kernel/gpu/dce_client/dce_client.h
2c48d7335bdb0b7ea88b78216c0aeab2e11e00c1 - src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h
5b151d0d97b83c9fb76b76c476947f9e15e774ad - src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h
e188d9f2d042ffe029b96d8fbb16c79a0fc0fb01 - src/nvidia/inc/kernel/gpu_mgr/gpu_db.h
ea32018e3464bb1ac792e39227badf482fa2dc67 - src/nvidia/inc/kernel/gpu_mgr/gpu_group.h
02d6a37ef1bb057604cb98a905fa02429f200c96 - src/nvidia/inc/kernel/mem_mgr/mem.h
a5f49a031db4171228a27482d091283e84632ace - src/nvidia/inc/kernel/mem_mgr/system_mem.h
d15991bc770c5ab41fe746995294c5213efa056b - src/nvidia/inc/kernel/mem_mgr/io_vaspace.h
5ae08b2077506cbc41e40e1b3672e615ce9d910f - src/nvidia/inc/kernel/mem_mgr/vaspace.h
0ce5d6370c086d2944b2e8d31ff72a510d98dc8f - src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h
4c386104eaead66c66df11258c3f1182b46e96ee - src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h
1a08e83fd6f0a072d6887c60c529e29211bcd007 - src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h
2d4afabd63699feec3aea5e89601db009fc51a08 - src/nvidia/inc/kernel/mem_mgr/standard_mem.h
24928c8b4e8b238f1921a1699f3af59bcff994ed - src/nvidia/src/lib/base_utils.c
a6134d6f5f3e3b0b4c274eb3b2d0a146644c842b - src/nvidia/src/lib/zlib/inflate.c
2e57601af217d0d8c4986abb593e8864e53e7e0b - src/nvidia/src/libraries/nvoc/src/runtime.c
9ea8bf51c44e500c9963a12a1e2a71ebffe6c4e8 - src/nvidia/src/libraries/nvbitvector/nvbitvector.c
0e7a9b9c697f260438ca5fda8527b0f4edc2de13 - src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c
e5ead344020dfc973ee7c7383e0f687a29642683 - src/nvidia/src/libraries/mapping_reuse/mapping_reuse.c
3c885d2c0e6cfb3f8585bddcba128b02e0196167 - src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c
ee7ea17829dfbbf9e6cd8d6c6fb2ada086b5d36e - src/nvidia/src/libraries/ioaccess/ioaccess.c
ca2ba7f19b705e39dbb8890a84ce84d34fbd8aa4 - src/nvidia/src/libraries/utils/nvassert.c
864bd314450490b687a652335a44fb407835152c - src/nvidia/src/libraries/containers/ringbuf.c
eb919a9e8711830c1c3f7fe71273e0a39862292e - src/nvidia/src/libraries/containers/vector.c
53aa343682f721f57058c7a17b1e872ca6fe7cea - src/nvidia/src/libraries/containers/map.c
7f58f03ec069ad5f5c64fedf4a484cc93473bd04 - src/nvidia/src/libraries/containers/queue.c
23c328fc27ad0317efe6ccd2da71cfd9db9da236 - src/nvidia/src/libraries/containers/multimap.c
ae669a466f1fecf67746a9fafc8c1119294c93d7 - src/nvidia/src/libraries/containers/list.c
9c80df385a47834da4f92dc11053ca40a37a7fe7 - src/nvidia/src/libraries/containers/btree/btree.c
a0e23ad69d805a7de439f0fbf79241c6466efdc2 - src/nvidia/src/libraries/containers/eheap/eheap_old.c
cccb1fedee02a240692688090e00ac1e289dec9e - src/nvidia/src/libraries/tls/tls.c
a045a19d750d48387640ab659bb30f724c34b8c8 - src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c
d047abe66dd8a459c15224cc056fc6f2176b0c6a - src/nvidia/src/libraries/nvport/util/util_gcc_clang.c
f0c486c1ad0f7d9516b13a02d52b4d857d8865b1 - src/nvidia/src/libraries/nvport/util/util_compiler_switch.c
9b69fbf3efea6ba58f9ba7cb0189c9264c994657 - src/nvidia/src/libraries/nvport/sync/sync_common.h
eb8b5fcab51c47f58a37958ddb38ff90991bcbbe - src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c
b2ae1406c94779f575d3e2233a7ab248ac10e74f - src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h
e2fec1a305dfec07456faec8ea5e75f601d76b5e - src/nvidia/src/libraries/nvport/memory/memory_tracking.c
c5a16e5bb7d304ffe5e83d7b27226cbecdbc7ce1 - src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c
db01179ad5e6333844bd3e31b62d0dc262c98875 - src/nvidia/src/libraries/nvport/memory/memory_generic.h
2c00bd224d17c0cc5469b5140f3be3d23b494922 - src/nvidia/src/libraries/nvport/string/string_generic.c
b387005657f81538fab5962d4aabbc5dc681aa1b - src/nvidia/src/libraries/nvport/core/core.c
702c73446bba35f88249cfe609ac0ca39dbd80ff - src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c
9ca28a5af5663dec54b4cd35f48a8a3d8e52e25f - src/nvidia/src/libraries/nvport/cpu/cpu_common.c
a305654bafc883ad28a134a04e83bbd409e0fc06 - src/nvidia/src/libraries/nvport/cpu/cpu_common.h
099c17e5931d5d881d8248ec68041fa0bbc2a9bc - src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c
1f2e9d09e658474b36d0b0ecd9380d0d2bcc86b2 - src/nvidia/src/libraries/resserv/src/rs_domain.c
f9cb28c60e7063ddb5b2a2af4a053a477c95c74b - src/nvidia/src/libraries/resserv/src/rs_server.c
dac54d97b38ad722198ec918668f175dc5122e4e - src/nvidia/src/libraries/resserv/src/rs_access_map.c
ede517ff5f53666a23ad2edec7e9fcd85c6ef7d1 - src/nvidia/src/libraries/resserv/src/rs_client.c
26d872a8495e38065af34aed9a60ab9a08898d40 - src/nvidia/src/libraries/resserv/src/rs_resource.c
408e1e5430e5e507e7e59adc292175150e50b825 - src/nvidia/src/libraries/resserv/src/rs_access_rights.c
304e2fb9bbf6d37358779d4e321f33ac76efcd39 - src/nvidia/src/kernel/diagnostics/nvlog.c
b3a29311cc22e2dae686f8ed2df6bc828aa826cf - src/nvidia/src/kernel/diagnostics/profiler.c
439543a41a36b0959b5f4c099f4adaa379b9f912 - src/nvidia/src/kernel/diagnostics/code_coverage_mgr.c
c1e5733847085bede6eb128eff3bad14549a31db - src/nvidia/src/kernel/diagnostics/nvlog_printf.c
d10c5031c3bc00ae1243729c39496df38d2c9ae3 - src/nvidia/src/kernel/os/os_init.c
2255d1ae2d942c3fed9a4b0a41020d0e49cb8648 - src/nvidia/src/kernel/os/os_timer.c
b887b661ffbe6c223c60f544b1fab32690cd8c75 - src/nvidia/src/kernel/os/os_sanity.c
f228bc86fd9149675cb554d6f596d81fdd4c3770 - src/nvidia/src/kernel/os/os_stubs.c
8800bf3ec679a1c3d36b89992b3f2f95365ec834 - src/nvidia/src/kernel/rmapi/entry_points.c
348c34e13f006f1320536876cb7393d8232e61de - src/nvidia/src/kernel/rmapi/rpc_common.c
8f033323f3ae264a79f779abb163442deb17e88a - src/nvidia/src/kernel/rmapi/rmapi.c
bc7c0b5bd06a1c58714b782d85f740632c6e152f - src/nvidia/src/kernel/rmapi/rmapi_cache_handlers.c
ac6a5b3adf15eac4a7bd9ae24981f6f5fc727097 - src/nvidia/src/kernel/rmapi/deprecated_context.h
b1e57ee17d6641412a4065317be3b81e5db94824 - src/nvidia/src/kernel/rmapi/event_notification.c
a965c5f028c1d47d7da0dd03dabbf8aebc817523 - src/nvidia/src/kernel/rmapi/rs_utils.c
a2ad052692006f70e97fd3d186f19c7ddfe80c4c - src/nvidia/src/kernel/rmapi/deprecated_context.c
7a0a8914b407f836627d8262de2de6cab2dd691d - src/nvidia/src/kernel/rmapi/rmapi_specific.c
d915b65380b59e557e5043f839c42d4105caa111 - src/nvidia/src/kernel/rmapi/rmapi_utils.c
2c5b12d5eb17c313138262cd1e42eb940a4d9ed8 - src/nvidia/src/kernel/rmapi/client.c
ab24efdee819d113fe72ec12c0e359c514151336 - src/nvidia/src/kernel/rmapi/resource_desc_flags.h
1745523e56fc0ff5a45d4b2473e13f0cc6f2afb1 - src/nvidia/src/kernel/rmapi/event_buffer.c
f70b6d7e8f21bf26d9c8171d62cbdf934fe3a30e - src/nvidia/src/kernel/rmapi/rmapi_stubs.c
09fc97bd7daa74a0b2e55fc5632b2f25464412dc - src/nvidia/src/kernel/rmapi/client_resource.c
c21223701bd7afd09e706616105f3f5f365afa5d - src/nvidia/src/kernel/rmapi/rmapi_finn.c
433c6091b3b986151e27ea952cef1dc83ff3095c - src/nvidia/src/kernel/rmapi/lock_test.c
682977753c878ccee6279e539cf11bee2b548752 - src/nvidia/src/kernel/rmapi/resource_desc.c
6dc3f6642c450043cc9b361037f4cb2091e7cb58 - src/nvidia/src/kernel/rmapi/sharing.c
00a6ef509ed8484d038c54b47642bc1a00125077 - src/nvidia/src/kernel/rmapi/lock_stress.c
3b53d6b8ef183702327b4bc3a96aa06f67475ddc - src/nvidia/src/kernel/rmapi/param_copy.c
1c9b26108c6b7f27c5f4fe84e10d83cfb32c9b5b - src/nvidia/src/kernel/rmapi/resource_list.h
3b9809740d88ab4b5b9c9d1adbd3ec304f6f6c7e - src/nvidia/src/kernel/rmapi/resource.c
41c397e2cc8c8b1c9c734c435d2d4c17cf709e63 - src/nvidia/src/kernel/rmapi/mapping_cpu.c
58ed3486109a54829f1afdf214c15529eaed678b - src/nvidia/src/kernel/rmapi/mapping.c
0172aa3770ca55bbfbd5e66f48f4e4820a4d5576 - src/nvidia/src/kernel/rmapi/event.c
e26021985ccfa2fb94c96310d9700df405817889 - src/nvidia/src/kernel/rmapi/control.c
6ee3cc915f68b5b70274eec219b7fd6799479459 - src/nvidia/src/kernel/rmapi/rmapi_cache.c
7a4abc27bdbcbb758545783f4182f200587ae3bd - src/nvidia/src/kernel/rmapi/binary_api.c
f821719c449e0300a3c27ebeaa3f4d6791ddaf60 - src/nvidia/src/kernel/rmapi/alloc_free.c
b7561ece996380512992736f947ddea0ba7f075e - src/nvidia/src/kernel/rmapi/resource_desc.h
72a6ae5bcae8eb4197047aaa5c1780b689544c87 - src/nvidia/src/kernel/rmapi/entry_points.h
4fbbb955e617d7b014e201a5263915939c87f884 - src/nvidia/src/kernel/rmapi/resource_list_required_includes.h
a16bffcad38862470b4424fa9a1b0d4013304600 - src/nvidia/src/kernel/core/hal_mgr.c
4d3f32dbc4cbe3d4d1301079eaf21005f74dea90 - src/nvidia/src/kernel/core/locks_common.c
e7195ca43692b6fbf6a3533437650c596cee88db - src/nvidia/src/kernel/core/locks_minimal.c
ee0bf4f81d33e9a7b6bbb2be27bb3973c8cb5b18 - src/nvidia/src/kernel/core/system.c
905a0f08067503374c757ed34d1ea87379ab4a71 - src/nvidia/src/kernel/core/thread_state.c
afa03f17393b28b9fc791bf09c4d35833447808d - src/nvidia/src/kernel/core/hal/hal.c
d3922085d63a7edf02b582fe0b6e3acba6124c25 - src/nvidia/src/kernel/core/hal/hals_all.c
8eac3ea49f9a53063f7106211e5236372d87bdaf - src/nvidia/src/kernel/core/hal/info_block.c
1f258d22d361a8902c27a4329e553a73b3fbe6e9 - src/nvidia/src/kernel/gpu/device.c
f520afc43afd9e40f779d2bdf3acc48ff7419625 - src/nvidia/src/kernel/gpu/eng_state.c
7ed54a614b756e32a61366d2009db26d1ef5fcc4 - src/nvidia/src/kernel/gpu/gpu_arch.c
1b2a50c873087a28cc4edd4a65945bcafc84bcf0 - src/nvidia/src/kernel/gpu/gpu_uuid.c
5bbac8b7323fe7f048e54b2ebc3ebe4f30655181 - src/nvidia/src/kernel/gpu/gpu.c
c7f5b73c217a181f5ff28886bf691ec7d528cb86 - src/nvidia/src/kernel/gpu/gpu_resource.c
2408846a2a5c24a102df13919f384c6675f56f29 - src/nvidia/src/kernel/gpu/device_ctrl.c
2b40a86a112c7643a69b094194c2ee1dd294f16a - src/nvidia/src/kernel/gpu/gpu_gspclient.c
261a5b014b3869c3ce5e830cf8b9529fa0b8a09d - src/nvidia/src/kernel/gpu/gpu_resource_desc.c
4e1be780ac696a61f056933e5550040a2d42c6bd - src/nvidia/src/kernel/gpu/gpu_device_mapping.c
57941830e179d534a7329608658c82fd91ff4a57 - src/nvidia/src/kernel/gpu/gpu_timeout.c
89a6229720a7d5276d73ad51a210ce6f60cedb08 - src/nvidia/src/kernel/gpu/gpu_user_shared_data.c
bc508781e640dbf756d9c9e43e75227d05b413c7 - src/nvidia/src/kernel/gpu/device_share.c
84c2c6a59313d36aa70c8a01cfedf1d1e7a3d931 - src/nvidia/src/kernel/gpu/gpu_access.c
d0d744c416a52404a52c35ede015629990934003 - src/nvidia/src/kernel/gpu/gpu_engine_type.c
12c1f9494317c34b1b9bfcc58bf7bee81b08c98e - src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c
ea626b20043182e3b374cb05d02c75b482fcd3a3 - src/nvidia/src/kernel/gpu/gpu_rmapi.c
099da8d641fb4481f9a4c625588dd4aa4ce20bcd - src/nvidia/src/kernel/gpu/subdevice/subdevice.c
6fab19f1f68bdb8d2b969efc6f030e2066bc6b5e - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c
b4e503b320119fecdb22dfda1268ce31e1a7ecd7 - src/nvidia/src/kernel/gpu/subdevice/generic_engine.c
9afe5cedd5e7d535ee56f4f5b3cc549f154d8be2 - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c
796d1368584a9318a39ed313dcb86bbcca40ad83 - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c
4c363a34fe12b9bb0d428c3d90974d7085d0366f - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_internal_kernel.c
fcf79cf10019193a9e57f8d19b5a37bac6120365 - src/nvidia/src/kernel/gpu/arch/t25x/kern_gpu_t256d.c
095d4a87b067038bd2d80a1c4b2d9407810b0e66 - src/nvidia/src/kernel/gpu/arch/t26x/kern_gpu_t264d.c
c20ed8bd9fda88b036c6ff677b7c25ebd171434f - src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_arch_t234d.c
b09af17437a01e63e960414a4534074da240dc59 - src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c
ceb516c8064e1df2d18897f98f5c8ea58e907973 - src/nvidia/src/kernel/gpu/disp/disp_capabilities.c
c67baeb5df33080d99f322786759fc3f5436301d - src/nvidia/src/kernel/gpu/disp/disp_channel.c
8fafebf746bfcde2c53435be386a8a0846973b0c - src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c
6437dd659a38c62cd81fb59f229bd94e59f37e71 - src/nvidia/src/kernel/gpu/disp/disp_sf_user.c
0fbfb9dd91147f04bea1060788efc1121078c159 - src/nvidia/src/kernel/gpu/disp/kern_disp.c
5aa67b54fcd16f648d7a72b9c2c4ff3fb6d3a5be - src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c
56027ec220553e1febe42f37fd70757cbb034dcb - src/nvidia/src/kernel/gpu/disp/disp_objs.c
b95080033ecc8736a0cdf9476cec7563c4a2af0f - src/nvidia/src/kernel/gpu/disp/vblank_callback/vblank.c
caba45a10f43e7817f491e7856ef30dd49782f6e - src/nvidia/src/kernel/gpu/disp/head/kernel_head.c
f59763139d9993ae545ded8057706cc4d65afc0c - src/nvidia/src/kernel/gpu/disp/head/arch/v04/kernel_head_0401.c
eb00ffa5a892558d39db15f473e2c308acfd86d9 - src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0404.c
2b19caf7def14190c99dc4e41983b4a3e3334f22 - src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0401.c
6d99d644a8294d08b0fdebf183306bbdadf819e3 - src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c
57fec208154cd0d25838a688f6457598baf2de7a - src/nvidia/src/kernel/gpu/disp/arch/v02/kern_disp_0204.c
64aa574198449e9556328d1c08f08b3bde5bfad0 - src/nvidia/src/kernel/gpu/disp/arch/v05/kern_disp_0501.c
d911e6ae9f7b96e6f441208d38701a8d833e7455 - src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c
ae5ef73d6e74026e0b847977c41b92cbf0f30a62 - src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c
4cfab589176c432463859f148ad32c7dac2c83d3 - src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c
60e8d1fa9cd375be783c4575baa2e99ac2b22a88 - src/nvidia/src/kernel/gpu/timer/timer.c
f6e518524581b772f8fdbc80418a2018570940ca - src/nvidia/src/kernel/gpu/timer/timer_ostimer.c
1f4d15f959df38f4f6ea48c7b10fc859c6e04b12 - src/nvidia/src/kernel/gpu/audio/hda_codec_api.c
10a8bfd47ce609763c07a0d61be2f71f9f91889e - src/nvidia/src/kernel/gpu/mem_mgr/mem_ctrl.c
bfc82499a8b9b8ce10411f6c391b0e575dc7c0d6 - src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c
a62f423d6cf69e96b0523a233ec00353d63ee8bd - src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c
92611eb4f3bed31064a9efbb54a1ece7ffcfc2af - src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c
4a95b73f744807d96510b0ad7181eae5b12839ce - src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c
ce09583697a98a2d0e8466dd45764f15945f55c2 - src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c
cebb9eee63e23bb934881b3313e422b50fb38abb - src/nvidia/src/kernel/gpu/dce_client/dce_client.c
d5d8ff429d3bda7103bafcb2dca94678efc8ddd8 - src/nvidia/src/kernel/gpu_mgr/gpu_group.c
2b49d8a3413a1731bc4fb0bab3f32ff272a71a8c - src/nvidia/src/kernel/gpu_mgr/gpu_db.c
37d1e3dd86e6409b8e461f90386e013194c9e4d1 - src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c
fe618e428d9a172a0fd9412f5a20df64d7270418 - src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c
593bbc5b93b620019144fadf1281a180ec050012 - src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c
54c1d1a44474a7027c5290551e60f13678226301 - src/nvidia/src/kernel/mem_mgr/standard_mem.c
44069d6ebbd94a11267e6cc0179ab167f91faec4 - src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c
5a5e689cf264134ae8c4300d986c209c04167743 - src/nvidia/src/kernel/mem_mgr/vaspace.c
5b9048e62581a3fbb0227d1a46c4ee8d8397bf5b - src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h
630200d06b6588d7fa8c5b1ea16146e8281163d7 - src/nvidia/src/kernel/mem_mgr/io_vaspace.c
04876ed2dedf0ac3228ec6261a0f3f79609e44a5 - src/nvidia/src/kernel/mem_mgr/system_mem.c
873de51b330501a86ec7656fcf3f615034c49f8e - src/nvidia/src/kernel/mem_mgr/os_desc_mem.c
ed8376f04af08af8da7d47c6340ff38a8910de87 - src/nvidia/src/kernel/mem_mgr/mem.c
08762b3172f6309f1aeab895761193fa19cb176f - src/nvidia/interface/nv_sriov_defines.h
024b112ea410ee1b1badb585b03fdbabb64ade34 - src/nvidia/interface/nvrm_registry.h
3f7b20e27e6576ee1f2f0557d269697a0b8af7ec - src/nvidia/interface/nv-firmware-registry.h
d02ee5bb3f19dffd8b5c30dc852cea243bcdf399 - src/nvidia/interface/acpidsmguids.h
60c7cafce7bd5240e8409e3c5b71214262347efc - src/nvidia/interface/acpigenfuncs.h
bff92c9767308a13df1d0858d5f9c82af155679a - src/nvidia/interface/nvacpitypes.h
7790849d0d261e84d04ab5a481bb57309de6409a - src/nvidia/interface/deprecated/rmapi_deprecated_utils.c
82f65de514ef7e2204cfb618d398cf3af8c12778 - src/nvidia/interface/deprecated/rmapi_deprecated.h
49e299b7257e179b701747e061b6b0214d5565f0 - src/nvidia/interface/rmapi/src/g_finn_rm_api.c
7b8431767b7c4b3861582ddab27a079568bf0660 - src/nvidia-modeset/Makefile
7e1249c1d187aec5891eabe5bacae2189d33dc55 - src/nvidia-modeset/lib/nvkms-sync.c
c3ab6005d7083e90145cac66addf815c4f93d9a0 - src/nvidia-modeset/lib/nvkms-format.c
f69ac0ec080036b8abc7f1ae7b857989f5c9df4a - src/nvidia-modeset/include/nvkms-headsurface-3d.h
b8854261256a801af52d1201081afa9c17486a96 - src/nvidia-modeset/include/nvkms-3dvision.h
3212e81bcde5a5dcec5dbba4155a41ca52dd2304 - src/nvidia-modeset/include/nvkms-prealloc.h
24aaf3a4cb16be7a5aaa8317090142743e3dd797 - src/nvidia-modeset/include/nvkms-flip-workarea.h
be6cff078fcf66221762a4af1515e01d294dd2f6 - src/nvidia-modeset/include/nvkms-push.h
4361f10ff446c401c3f52bf36aed52ca24706d49 - src/nvidia-modeset/include/nvkms-vrr.h
08aa0dd2f18a8cf74539ea8b25ef3f3646567a0c - src/nvidia-modeset/include/nvkms-evo1.h
9bfb2d12ecdaecaba7eaaffa3040ab142d37f892 - src/nvidia-modeset/include/nvkms-prealloc-types.h
0bd9cf097cfa373f0bed7be8fe5299e2ea4bf669 - src/nvidia-modeset/include/g_nvkms-evo-states.h
708e037052ea0b3d6309fa44a205282b7a69a331 - src/nvidia-modeset/include/nvkms-difr.h
412d8028a548e67e9ef85cb7d3f88385e70c56f9 - src/nvidia-modeset/include/nvkms-console-restore.h
52b6d1a1a6793d232571e6366709436b018ae3b7 - src/nvidia-modeset/include/nvkms-dpy.h
81fcc817dfb8ae1f98b63d2c1acacc303fedb554 - src/nvidia-modeset/include/nvkms-dpy-override.h
0f251b41b076bb80eeebf7d54e6fd6c764404c28 - src/nvidia-modeset/include/nvkms-evo-states.h
70d9251f331bbf28f5c5bbdf939ebad94db9362d - src/nvidia-modeset/include/nvkms-softfloat.h
6e3681d5caa36312804c91630eaaf510eda897d2 - src/nvidia-modeset/include/nvkms-dma.h
eb5248c4b0b51e7aecd2de87e496253b3b235c70 - src/nvidia-modeset/include/nvkms-utils-flip.h
377dd4a29b2ea5937a9b8fc3fba0c9e4ef92992e - src/nvidia-modeset/include/nvkms-cursor.h
e1225d674a0e6e58110750868c45a4655110a4d8 - src/nvidia-modeset/include/nvkms-headsurface-swapgroup.h
9e3d50761d3a27c1db3085ff82b7d194ff47bf34 - src/nvidia-modeset/include/nvkms-rm.h
fd9fa6da0fc28b00be524b0bed25a68c56278363 - src/nvidia-modeset/include/nvkms-modeset.h
be6e0e97c1e7ffc0daa2f14ef7b05b9f9c11dc16 - src/nvidia-modeset/include/nvkms-attributes.h
e30d9c286263051d14a1862f0c630295a78abde7 - src/nvidia-modeset/include/nvkms-headsurface-priv.h
3fd0822b8b44d13685ecde9d02300e6cfbb123db - src/nvidia-modeset/include/nvkms-hdmi.h
6b21a68e254becdd2641bc456f194f54c23abe51 - src/nvidia-modeset/include/nvkms-framelock.h
53122264a19ea00ef26e6accde3a3a7570e46b15 - src/nvidia-modeset/include/nvkms-vblank-sem-control.h
1b21352fd9d0b1c5708cb8512acf20ba2e13955d - src/nvidia-modeset/include/nvkms-headsurface.h
59d20eff40e4e488eb3ab7c97b5e171142dcdbcf - src/nvidia-modeset/include/nvkms-modeset-workarea.h
933f9b359a1c3807771e2719c6dd80d71beff3c8 - src/nvidia-modeset/include/nvkms-utils.h
f5f3b11c78a8b0eef40c09e1751615a47f516edb - src/nvidia-modeset/include/nvkms-hal.h
03f3fd4c2fb7db83441805a5c350b121bd3117b4 - src/nvidia-modeset/include/nvkms-setlut-workarea.h
31acf6af2a4c82e3429efa77d110cb346c11905f - src/nvidia-modeset/include/nvkms-lut.h
e4bae9a0df729119071902f7ad59704c97adee0e - src/nvidia-modeset/include/nvkms-private.h
fbe2cbfd32b40d8188c6b25716fb360720ab5760 - src/nvidia-modeset/include/nvkms-evo.h
04f2e01c7f798a615319accc2dd713f617a81172 - src/nvidia-modeset/include/nvkms-headsurface-config.h
4a94381bd8c24b09193577d3f05d6d61f178e1cf - src/nvidia-modeset/include/nvkms-ctxdma.h
b4d53599736b03ee1bc149abe7b602336f40295c - src/nvidia-modeset/include/nvkms-flip.h
46fc0e138ba7be5fa3ea0ada3ee0a78656950c80 - src/nvidia-modeset/include/nvkms-modeset-types.h
260b6ef87c755e55a803adad4ce49f2d57315f9a - src/nvidia-modeset/include/nvkms-event.h
35fa1444c57f7adbbddddc612237f3ad38cdd78f - src/nvidia-modeset/include/nvkms-rmapi.h
8782df838ea3d2617e9842c89389f51137b19a73 - src/nvidia-modeset/include/nvkms-headsurface-matrix.h
881d7e4187ff9c7e9d02672aedafc1605f3055ec - src/nvidia-modeset/include/nvkms-modepool.h
60c01e29aa91aa80bf3750a1b11fe61a6cdfde58 - src/nvidia-modeset/include/nvkms-types.h
cc3dc4021b76782434efd2aa81d3ffdd1f3b1f0a - src/nvidia-modeset/include/nvkms-headsurface-ioctl.h
3dc2113c55970fa70b7afb4fd30f2f1e777ebc12 - src/nvidia-modeset/include/nvkms-surface.h
aa43ad7f970331c56378b7797f66b0a77d8e99dd - src/nvidia-modeset/include/nvkms-evo3.h
8c7e0e15c1038fe518e98d8f86fafb250b10a1d2 - src/nvidia-modeset/include/nvkms-stereo.h
9deeeae9081fd828a14f3b0df5fbf17a81161786 - src/nvidia-modeset/include/nvkms-hw-flip.h
6460f8427fdb375d659975c7f6eaadaca0ed2b2c - src/nvidia-modeset/include/dp/nvdp-device.h
1912d523f567c4fc36075942cf8acaf5d5478232 - src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h
a233bdcd5daa0582acf2cd5b0f339ad54d09bf13 - src/nvidia-modeset/include/dp/nvdp-timer.h
2b91423ff88ca398324088d4f910e81f6944123a - src/nvidia-modeset/include/dp/nvdp-connector.h
aa8aa13c6fc48ff5ef621f243e94dcc01a46dea3 - src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h
c0de6efe1d5c57da324118f108ea0570a6923036 - src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h
b01351ece15ce0d54a19ad0d7ffa056963d72488 - src/nvidia-modeset/kapi/src/nvkms-kapi.c
a4d52bb238ce94f3427f25bd169e58d5d5f4abd1 - src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c
ce42ceac4c4cf9d249d66ab57ae2f435cd9623fc - src/nvidia-modeset/kapi/src/nvkms-kapi-sync.c
80c2c9a2a05beb0202239db8b0dd7080ff21c194 - src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h
4c856c1324060dcb5a9e72e5e82c7a60f6324733 - src/nvidia-modeset/kapi/interface/nvkms-kapi.h
11af2aeea97398b58f628fe4685b5dfcfda5791b - src/nvidia-modeset/src/nvkms-modeset.c
016fd1b111731c6d323425d52bfe1a04d8bcade7 - src/nvidia-modeset/src/nvkms-headsurface-swapgroup.c
37a6d00e8721a9c4134810f8be3e7168f8cbb226 - src/nvidia-modeset/src/nvkms-evo.c
4758c601621603597bd2387c4f08b3fdc17e375d - src/nvidia-modeset/src/nvkms-hw-flip.c
5e3188c2d9b580ff69e45842f841f5c92c0c6edb - src/nvidia-modeset/src/nvkms-headsurface-ioctl.c
e1a3c31638416a0132c5301fe5dd4b1c93f14376 - src/nvidia-modeset/src/nvkms-cursor3.c
d48ff2da5fac6f8cd0522a25b947b5b8c01812ba - src/nvidia-modeset/src/nvkms-rm.c
30ad7839985dea46e6b6d43499210a3056da51ad - src/nvidia-modeset/src/nvkms-utils-flip.c
2c24667a18374ae967917df219f3775d9a79ae04 - src/nvidia-modeset/src/nvkms-headsurface-3d.c
fb8b4aa1e36f23e1927be3dbd351ab0357aeb735 - src/nvidia-modeset/src/nvkms-evo3.c
9ce404d122bbdcd5f626f2c2b7ff08a9bfcf4045 - src/nvidia-modeset/src/nvkms-flip.c
e5c96eb6b9884daf4a8d0d467b009008a45065b9 - src/nvidia-modeset/src/g_nvkms-evo-states.c
094c2169412cb577a6e9db9420da084264119284 - src/nvidia-modeset/src/nvkms-hal.c
1e0bf57319954911ddd2fe87b0cd05e257f1439e - src/nvidia-modeset/src/nvkms-surface.c
bd2e4a6102432d4ac1faf92b5d3db29e9e3cfafc - src/nvidia-modeset/src/nvkms-utils.c
6d41c9f84cc9ce2d16812e94a3fba055b3fc7308 - src/nvidia-modeset/src/nvkms-conf.c
05bfe67d8cb956a666804b8f27e507bbd35e2c2d - src/nvidia-modeset/src/nvkms-difr.c
9a8746ee4a4e772b8ac13f06dc0de8a250fdb4c7 - src/nvidia-modeset/src/nvkms-ctxdma.c
382141f251ce64e2d33add3b89225c373da9ea7d - src/nvidia-modeset/src/nvkms-hdmi.c
2e1644a912e7a27ec04288e000c3fa5439eecb60 - src/nvidia-modeset/src/nvkms-headsurface-matrix.c
127a3f77febf09d56b6fe3534bc62ff0ffa535d8 - src/nvidia-modeset/src/nvkms-dpy.c
e0756f45732035b1000a03bd8a995a46041904ae - src/nvidia-modeset/src/nvkms-vblank-sem-control.c
e4044bb85de59d662d0d579771c076cbe9b10bbb - src/nvidia-modeset/src/nvkms.c
12cbc57714f458b5673115bb5c4d380509d05277 - src/nvidia-modeset/src/nvkms-cursor.c
5c93bc35d8f93330dd7a1f7808e39c6001ee83e8 - src/nvidia-modeset/src/nvkms-headsurface-config.c
ed78249de63139ec2629bde58b616cef649281f1 - src/nvidia-modeset/src/nvkms-evo2.c
c51c4f2e3ac11bf86d4549ce5e9d9010199e37dd - src/nvidia-modeset/src/nvkms-prealloc.c
9d38d5147d06a293a272087d78d0b96b6003f11e - src/nvidia-modeset/src/nvkms-attributes.c
65b02b48caff2a9100b8c5614f91d42fb20da9c0 - src/nvidia-modeset/src/nvkms-dpy-override.c
a62b617aa5c89056c19a5f3c91402df8cfcc1103 - src/nvidia-modeset/src/nvkms-push.c
9fea40b7b55d6ebf3f73b5d469751c873ffbe7c0 - src/nvidia-modeset/src/nvkms-dma.c
da726d20eea99a96af4c10aace88f419e8ee2a34 - src/nvidia-modeset/src/nvkms-event.c
a1c7c3c1191762c0a1038674dee0075d532ccd2d - src/nvidia-modeset/src/nvkms-headsurface.c
2fabe1c14116a2b07f24d01710394ee84a6e3914 - src/nvidia-modeset/src/nvkms-3dvision.c
89b58b1e67ff7ed43c889fe7d85329d7f4762b91 - src/nvidia-modeset/src/nvkms-hw-states.c
c799d52bdc792efc377fb5cd307b0eb445c44d6a - src/nvidia-modeset/src/nvkms-cursor2.c
dd6c86b5557b02dd15a8ea0f10bde9770d90874e - src/nvidia-modeset/src/nvkms-evo4.c
be49ea18102a44914e0d7686c51430df18336383 - src/nvidia-modeset/src/nvkms-framelock.c
6bdb90474b5d31c53104f7b29b447b3f798aaa0e - src/nvidia-modeset/src/nvkms-vrr.c
05ca4acdfeb9b99eccc7e222846fc688473322ae - src/nvidia-modeset/src/nvkms-rmapi-dgpu.c
f754a27436fd1e1fa103de6110224c21ad7ea9f4 - src/nvidia-modeset/src/nvkms-pow.c
e8c6d2eedfba19f8f06dd57f629588615cf1a2e9 - src/nvidia-modeset/src/nvkms-evo1.c
d15f314bea66574e0ffc72966b86bae8366412f5 - src/nvidia-modeset/src/nvkms-console-restore.c
0699860902369359e5ff1a0ef46b87e955d4bb7a - src/nvidia-modeset/src/nvkms-modepool.c
403e6dbff0a607c2aecf3204c56633bd7b612ae2 - src/nvidia-modeset/src/nvkms-stereo.c
fd6ecacc4f273c88960148c070dd17d93f49909b - src/nvidia-modeset/src/nvkms-lut.c
771fee54d1123871e380db6f3227b4946b6be647 - src/nvidia-modeset/src/dp/nvdp-timer.cpp
6b985fc50b5040ce1a81418bed73a60edb5d3289 - src/nvidia-modeset/src/dp/nvdp-timer.hpp
dcf9f99e79a13b109a8665597f0fc7c00ec37957 - src/nvidia-modeset/src/dp/nvdp-connector.cpp
e0e50fc1c526ecf0fe2f60689a25adda1257e2b3 - src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp
16081091156a813977dfdd0718d55ea4a66a0686 - src/nvidia-modeset/src/dp/nvdp-device.cpp
6e17f81da1b94414c1cbf18c3ea92f25352d8bf5 - src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp
81065db63fda6468fdf56d853781fca8af610798 - src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp
e1f003a64cec57f299e65567d29e69951a62f44a - src/nvidia-modeset/src/dp/nvdp-host.cpp
ca07b8e8f507de47694ac7b3b1719b0931da02c6 - src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp
2b49249a135293d01e82ef11ee520596c9825875 - src/nvidia-modeset/src/shaders/g_pascal_shaders
09cb78322cc8465d42a4be6a1c3682566c66462d - src/nvidia-modeset/src/shaders/g_maxwell_shaders
a62c80e00077041d38d84e06c5834dca527e8a55 - src/nvidia-modeset/src/shaders/g_volta_shader_info.h
21cf709a8717d43c4abc6b66c8faad141592b7ce - src/nvidia-modeset/src/shaders/g_nvidia-headsurface-shader-info.h
fec9074463a5505e300f9feb77b60ec77b781bb7 - src/nvidia-modeset/src/shaders/g_turing_shader_info.h
cad54ab33c1132ba7453f54e9a02d34504e4fd5c - src/nvidia-modeset/src/shaders/g_pascal_shader_info.h
f3bdeb7d46fdc9c31940ea799ce4a0d328fe1844 - src/nvidia-modeset/src/shaders/g_ampere_shaders
0ba4739302e0938b5599afb7e7ad281b21e25cec - src/nvidia-modeset/src/shaders/g_maxwell_shader_info.h
1c02043d31faf4f79c4a54dd5a622e87ee276be8 - src/nvidia-modeset/src/shaders/g_volta_shaders
f540d144503d00941a1b32fb1a3d13061065b24e - src/nvidia-modeset/src/shaders/g_hopper_shader_info.h
74824b796722071bc3d90e4dacfed245dcda28cd - src/nvidia-modeset/src/shaders/g_turing_shaders
ce728856b76bfa428b199fd3b97e0cbc24ef54cd - src/nvidia-modeset/src/shaders/g_hopper_shaders
02bb8bc0f5d228d4a9a383d797daffd8936c4ad7 - src/nvidia-modeset/src/shaders/g_ampere_shader_info.h
9f35175e44247d4facb26a60614d40fcdb74416f - src/nvidia-modeset/src/shaders/g_shader_names.h
ca86fee8bd52e6c84e376199c5f3890078bc2031 - src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h
b2a5ddfd8dcb3000b9d102bd55b5b560730e81d5 - src/nvidia-modeset/os-interface/include/nvkms.h
51b367a6e289cc8957388745988315024f97506e - src/nvidia-modeset/interface/nvkms-api.h
b986bc6591ba17a74ad81ec4c93347564c6d5165 - src/nvidia-modeset/interface/nvkms-format.h
2ea1436104463c5e3d177e8574c3b4298976d37e - src/nvidia-modeset/interface/nvkms-ioctl.h
3bf4a2d1fec120ef5313c8bf119bc22fb3cf0cc5 - src/nvidia-modeset/interface/nvkms-modetimings.h
c54c62de441828282db9a4f5b35c2fa5c97d94f1 - src/nvidia-modeset/interface/nvkms-api-types.h
8e3e74d2b3f45381e7b0012d930cf451cbd1728f - src/nvidia-modeset/interface/nvkms-sync.h

Change-Id: If5ef3d3202eab829a730f4711eb572cfbfea8273
2025-08-25 10:24:25 -07:00
1022 changed files with 253432 additions and 62057 deletions

View File

@@ -1,156 +0,0 @@
# Changelog
## Release 545 Entries
#### Fixed
- Fix always-false conditional, [#493](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/493) by @meme8383
## Release 535 Entries
### [535.43.02] 2023-05-30
#### Fixed
- Fixed console restore with traditional VGA consoles.
#### Added
- Added support for Run Time D3 (RTD3) on Ampere and later GPUs.
- Added support for G-Sync on desktop GPUs.
## Release 530 Entries
### [530.41.03] 2023-03-23
### [530.30.02] 2023-02-28
#### Changed
- GSP firmware is now distributed as `gsp_tu10x.bin` and `gsp_ga10x.bin` to better reflect the GPU architectures supported by each firmware file in this release.
- The .run installer will continue to install firmware to /lib/firmware/nvidia/<version> and the nvidia.ko kernel module will load the appropriate firmware for each GPU at runtime.
#### Fixed
- Add support for resizable BAR on Linux when NVreg_EnableResizableBar=1 module param is set. [#3](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/3) by @sjkelly
#### Added
- Support for power management features like Suspend, Hibernate and Resume.
## Release 525 Entries
### [525.116.04] 2023-05-09
### [525.116.03] 2023-04-25
### [525.105.17] 2023-03-30
### [525.89.02] 2023-02-08
### [525.85.12] 2023-01-30
### [525.85.05] 2023-01-19
#### Fixed
- Fix build problems with Clang 15.0, [#377](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/377) by @ptr1337
### [525.78.01] 2023-01-05
### [525.60.13] 2022-12-05
### [525.60.11] 2022-11-28
#### Fixed
- Fixed nvenc compatibility with usermode clients [#104](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/104)
### [525.53] 2022-11-10
#### Changed
- GSP firmware is now distributed as multiple firmware files: this release has `gsp_tu10x.bin` and `gsp_ad10x.bin` replacing `gsp.bin` from previous releases.
- Each file is named after a GPU architecture and supports GPUs from one or more architectures. This allows GSP firmware to better leverage each architecture's capabilities.
- The .run installer will continue to install firmware to `/lib/firmware/nvidia/<version>` and the `nvidia.ko` kernel module will load the appropriate firmware for each GPU at runtime.
#### Fixed
- Add support for IBT (indirect branch tracking) on supported platforms, [#256](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/256) by @rnd-ash
- Return EINVAL when [failing to] allocating memory, [#280](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/280) by @YusufKhan-gamedev
- Fix various typos in nvidia/src/kernel, [#16](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/16) by @alexisgeoffrey
- Added support for rotation in X11, Quadro Sync, Stereo, and YUV 4:2:0 on Turing.
## Release 520 Entries
### [520.61.07] 2022-10-20
### [520.56.06] 2022-10-12
#### Added
- Introduce support for GeForce RTX 4090 GPUs.
### [520.61.05] 2022-10-10
#### Added
- Introduce support for NVIDIA H100 GPUs.
#### Fixed
- Fix/Improve Makefile, [#308](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/308/) by @izenynn
- Make nvLogBase2 more efficient, [#177](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/177/) by @DMaroo
- nv-pci: fixed always true expression, [#195](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/195/) by @ValZapod
## Release 515 Entries
### [515.76] 2022-09-20
#### Fixed
- Improved compatibility with new Linux kernel releases
- Fixed possible excessive GPU power draw on an idle X11 or Wayland desktop when driving high resolutions or refresh rates
### [515.65.07] 2022-10-19
### [515.65.01] 2022-08-02
#### Fixed
- Collection of minor fixes to issues, [#6](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/61) by @Joshua-Ashton
- Remove unnecessary use of acpi_bus_get_device().
### [515.57] 2022-06-28
#### Fixed
- Backtick is deprecated, [#273](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/273) by @arch-user-france1
### [515.48.07] 2022-05-31
#### Added
- List of compatible GPUs in README.md.
#### Fixed
- Fix various README capitalizations, [#8](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/8) by @27lx
- Automatically tag bug report issues, [#15](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/15) by @thebeanogamer
- Improve conftest.sh Script, [#37](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/37) by @Nitepone
- Update HTTP link to HTTPS, [#101](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/101) by @alcaparra
- moved array sanity check to before the array access, [#117](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/117) by @RealAstolfo
- Fixed some typos, [#122](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/122) by @FEDOyt
- Fixed capitalization, [#123](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/123) by @keroeslux
- Fix typos in NVDEC Engine Descriptor, [#126](https://github.com/NVIDIA/open-gpu-kernel-modules/pull/126) from @TrickyDmitriy
- Extranous apostrohpes in a makefile script [sic], [#14](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/14) by @kiroma
- HDMI no audio @ 4K above 60Hz, [#75](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/75) by @adolfotregosa
- dp_configcaps.cpp:405: array index sanity check in wrong place?, [#110](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/110) by @dcb314
- NVRM kgspInitRm_IMPL: missing NVDEC0 engine, cannot initialize GSP-RM, [#116](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/116) by @kfazz
- ERROR: modpost: "backlight_device_register" [...nvidia-modeset.ko] undefined, [#135](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/135) by @sndirsch
- aarch64 build fails, [#151](https://github.com/NVIDIA/open-gpu-kernel-modules/issues/151) by @frezbo
### [515.43.04] 2022-05-11
- Initial release.

1475
README.md
View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -8,7 +8,7 @@
# NV_KERNEL_SOURCES : The root of the kernel source tree.
# NV_KERNEL_OUTPUT : The kernel's output tree.
# NV_KERNEL_MODULES : A whitespace-separated list of modules to build.
# ARCH : The target CPU architecture: x86_64|arm64|powerpc
# ARCH : The target CPU architecture: x86_64|arm64
#
# Kbuild provides the variables:
#
@@ -57,71 +57,106 @@ ifeq ($(NV_UNDEF_BEHAVIOR_SANITIZER),1)
UBSAN_SANITIZE := y
endif
#
# Command to create a symbolic link, explicitly resolving the symlink target
# to an absolute path to abstract away the difference between Linux < 6.13,
# where the CWD is the Linux kernel source tree for Kbuild extmod builds, and
# Linux >= 6.13, where the CWD is the external module source tree.
#
# This is used to create the nv*-kernel.o -> nv*-kernel.o_binary symlinks for
# kernel modules which use precompiled binary object files.
#
quiet_cmd_symlink = SYMLINK $@
cmd_symlink = ln -sf $(abspath $<) $@
$(foreach _module, $(NV_KERNEL_MODULES), \
$(eval include $(src)/$(_module)/$(_module).Kbuild))
#
# Define CFLAGS that apply to all the NVIDIA kernel modules. EXTRA_CFLAGS
# is deprecated since 2.6.24 in favor of ccflags-y, but we need to support
# older kernels which do not have ccflags-y. Newer kernels append
# $(EXTRA_CFLAGS) to ccflags-y for compatibility.
#
ccflags-y += -I$(src)/common/inc
ccflags-y += -I$(src)
ccflags-y += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-format-extra-args
ccflags-y += -D__KERNEL__ -DMODULE -DNVRM
ccflags-y += -DNV_VERSION_STRING=\"580.00\"
EXTRA_CFLAGS += -I$(src)/common/inc
EXTRA_CFLAGS += -I$(src)
EXTRA_CFLAGS += -Wall $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-error -Wno-format-extra-args
EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM
EXTRA_CFLAGS += -DNV_VERSION_STRING=\"540.3.2\"
# Include and link Tegra out-of-tree modules.
ifneq ($(wildcard /usr/src/nvidia/nvidia-oot),)
SYSSRCNVOOT ?= /usr/src/nvidia/nvidia-oot
endif
ifneq ($(SYSSRCHOST1X),)
EXTRA_CFLAGS += -I$(SYSSRCHOST1X)
ccflags-y += -I$(SYSSRCHOST1X)
endif
EXTRA_CFLAGS += -Wno-unused-function
ifneq ($(SYSSRCNVOOT),)
ccflags-y += -I$(SYSSRCNVOOT)/include
KBUILD_EXTRA_SYMBOLS = $(SYSSRCNVOOT)/Module.symvers
endif
# Some Android kernels prohibit driver use of filesystem functions like
# filp_open() and kernel_read(). Disable the NV_FILESYSTEM_ACCESS_AVAILABLE
# functionality that uses those functions when building for Android.
PLATFORM_IS_ANDROID ?= 0
ifeq ($(PLATFORM_IS_ANDROID),1)
ccflags-y += -DNV_FILESYSTEM_ACCESS_AVAILABLE=0
else
ccflags-y += -DNV_FILESYSTEM_ACCESS_AVAILABLE=1
endif
ccflags-y += -Wno-unused-function
ifneq ($(NV_BUILD_TYPE),debug)
EXTRA_CFLAGS += -Wuninitialized
ccflags-y += -Wuninitialized
endif
EXTRA_CFLAGS += -fno-strict-aliasing
ccflags-y += -fno-strict-aliasing
ifeq ($(ARCH),arm64)
EXTRA_CFLAGS += -mstrict-align
ccflags-y += -mstrict-align
endif
ifeq ($(NV_BUILD_TYPE),debug)
EXTRA_CFLAGS += -g
ccflags-y += -g
endif
EXTRA_CFLAGS += -ffreestanding
ccflags-y += -ffreestanding
ifeq ($(ARCH),arm64)
EXTRA_CFLAGS += -mgeneral-regs-only -march=armv8-a
EXTRA_CFLAGS += $(call cc-option,-mno-outline-atomics,)
ccflags-y += -mgeneral-regs-only -march=armv8-a
ccflags-y += $(call cc-option,-mno-outline-atomics,)
endif
ifeq ($(ARCH),x86_64)
EXTRA_CFLAGS += -mno-red-zone -mcmodel=kernel
ccflags-y += -mno-red-zone -mcmodel=kernel
endif
ifeq ($(ARCH),powerpc)
EXTRA_CFLAGS += -mlittle-endian -mno-strict-align -mno-altivec
endif
EXTRA_CFLAGS +=
EXTRA_CFLAGS += $(call cc-option,-Werror=undef,)
EXTRA_CFLAGS += -DNV_SPECTRE_V2=$(NV_SPECTRE_V2)
EXTRA_CFLAGS += -DNV_KERNEL_INTERFACE_LAYER
ccflags-y +=
ccflags-y += $(call cc-option,-Werror=undef,)
ccflags-y += -DNV_SPECTRE_V2=$(NV_SPECTRE_V2)
ccflags-y += -DNV_KERNEL_INTERFACE_LAYER
#
# Detect SGI UV systems and apply system-specific optimizations.
#
ifneq ($(wildcard /proc/sgi_uv),)
EXTRA_CFLAGS += -DNV_CONFIG_X86_UV
ccflags-y += -DNV_CONFIG_X86_UV
endif
ifdef VGX_FORCE_VFIO_PCI_CORE
ccflags-y += -DNV_VGPU_FORCE_VFIO_PCI_CORE
endif
WARNINGS_AS_ERRORS ?=
ifeq ($(WARNINGS_AS_ERRORS),1)
ccflags-y += -Werror
else
ccflags-y += -Wno-error
endif
#
# The conftest.sh script tests various aspects of the target kernel.
@@ -147,7 +182,12 @@ NV_CONFTEST_CMD := /bin/sh $(NV_CONFTEST_SCRIPT) \
NV_CFLAGS_FROM_CONFTEST := $(shell $(NV_CONFTEST_CMD) build_cflags)
NV_CONFTEST_CFLAGS = $(NV_CFLAGS_FROM_CONFTEST) $(EXTRA_CFLAGS) -fno-pie
NV_CONFTEST_CFLAGS = $(NV_CFLAGS_FROM_CONFTEST) $(ccflags-y) -fno-pie
NV_CONFTEST_CFLAGS += $(filter -std=%,$(KBUILD_CFLAGS))
NV_CONFTEST_CFLAGS += $(call cc-disable-warning,pointer-sign)
NV_CONFTEST_CFLAGS += $(call cc-option,-fshort-wchar,)
NV_CONFTEST_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types,)
NV_CONFTEST_CFLAGS += -Wno-error
NV_CONFTEST_COMPILE_TEST_HEADERS := $(obj)/conftest/macros.h
NV_CONFTEST_COMPILE_TEST_HEADERS += $(obj)/conftest/functions.h
@@ -207,106 +247,7 @@ $(obj)/conftest/patches.h: $(NV_CONFTEST_SCRIPT)
@mkdir -p $(obj)/conftest
@$(NV_CONFTEST_CMD) patch_check > $@
# Each of these headers is checked for presence with a test #include; a
# corresponding #define will be generated in conftest/headers.h.
NV_HEADER_PRESENCE_TESTS = \
asm/system.h \
drm/drmP.h \
drm/drm_aperture.h \
drm/drm_auth.h \
drm/drm_gem.h \
drm/drm_crtc.h \
drm/drm_color_mgmt.h \
drm/drm_atomic.h \
drm/drm_atomic_helper.h \
drm/drm_atomic_state_helper.h \
drm/drm_encoder.h \
drm/drm_atomic_uapi.h \
drm/drm_drv.h \
drm/drm_fbdev_generic.h \
drm/drm_framebuffer.h \
drm/drm_connector.h \
drm/drm_probe_helper.h \
drm/drm_blend.h \
drm/drm_fourcc.h \
drm/drm_prime.h \
drm/drm_plane.h \
drm/drm_vblank.h \
drm/drm_file.h \
drm/drm_ioctl.h \
drm/drm_device.h \
drm/drm_mode_config.h \
drm/drm_modeset_lock.h \
dt-bindings/interconnect/tegra_icc_id.h \
generated/autoconf.h \
generated/compile.h \
generated/utsrelease.h \
linux/efi.h \
linux/kconfig.h \
linux/platform/tegra/mc_utils.h \
linux/printk.h \
linux/ratelimit.h \
linux/prio_tree.h \
linux/log2.h \
linux/of.h \
linux/bug.h \
linux/sched.h \
linux/sched/mm.h \
linux/sched/signal.h \
linux/sched/task.h \
linux/sched/task_stack.h \
xen/ioemu.h \
linux/fence.h \
linux/dma-fence.h \
linux/dma-resv.h \
soc/tegra/chip-id.h \
soc/tegra/fuse.h \
soc/tegra/tegra_bpmp.h \
video/nv_internal.h \
linux/platform/tegra/dce/dce-client-ipc.h \
linux/nvhost.h \
linux/nvhost_t194.h \
linux/host1x-next.h \
asm/book3s/64/hash-64k.h \
asm/set_memory.h \
asm/prom.h \
asm/powernv.h \
linux/atomic.h \
asm/barrier.h \
asm/opal-api.h \
sound/hdaudio.h \
asm/pgtable_types.h \
asm/page.h \
linux/stringhash.h \
linux/dma-map-ops.h \
rdma/peer_mem.h \
sound/hda_codec.h \
linux/dma-buf.h \
linux/time.h \
linux/platform_device.h \
linux/mutex.h \
linux/reset.h \
linux/of_platform.h \
linux/of_device.h \
linux/of_gpio.h \
linux/of_clk.h \
linux/gpio.h \
linux/gpio/consumer.h \
linux/interconnect.h \
linux/pm_runtime.h \
linux/clk.h \
linux/clk-provider.h \
linux/ioasid.h \
linux/stdarg.h \
linux/iosys-map.h \
asm/coco.h \
linux/vfio_pci_core.h \
linux/mdev.h \
soc/tegra/bpmp-abi.h \
soc/tegra/bpmp.h \
linux/sync_file.h \
linux/cc_platform.h
include $(src)/header-presence-tests.mk
# Filename to store the define for the header in $(1); this is only consumed by
# the rule below that concatenates all of these together.

View File

@@ -28,7 +28,7 @@ else
else
KERNEL_UNAME ?= $(shell uname -r)
KERNEL_MODLIB := /lib/modules/$(KERNEL_UNAME)
KERNEL_SOURCES := $(shell test -d $(KERNEL_MODLIB)/source && echo $(KERNEL_MODLIB)/source || echo $(KERNEL_MODLIB)/build)
KERNEL_SOURCES := $(shell ((test -d $(KERNEL_MODLIB)/source && echo $(KERNEL_MODLIB)/source) || (test -d $(KERNEL_MODLIB)/build/source && echo $(KERNEL_MODLIB)/build/source)) || echo $(KERNEL_MODLIB)/build)
endif
KERNEL_OUTPUT := $(KERNEL_SOURCES)
@@ -42,27 +42,83 @@ else
else
KERNEL_UNAME ?= $(shell uname -r)
KERNEL_MODLIB := /lib/modules/$(KERNEL_UNAME)
ifeq ($(KERNEL_SOURCES), $(KERNEL_MODLIB)/source)
# $(filter patter...,text) - Returns all whitespace-separated words in text that
# do match any of the pattern words, removing any words that do not match.
# Set the KERNEL_OUTPUT only if either $(KERNEL_MODLIB)/source or
# $(KERNEL_MODLIB)/build/source path matches the KERNEL_SOURCES.
ifneq ($(filter $(KERNEL_SOURCES),$(KERNEL_MODLIB)/source $(KERNEL_MODLIB)/build/source),)
KERNEL_OUTPUT := $(KERNEL_MODLIB)/build
KBUILD_PARAMS := KBUILD_OUTPUT=$(KERNEL_OUTPUT)
endif
endif
# If CC hasn't been set explicitly, check the value of CONFIG_CC_VERSION_TEXT.
# Look for the compiler specified there, and use it by default, if found.
ifeq ($(origin CC),default)
cc_version_text=$(firstword $(shell . $(KERNEL_OUTPUT)/.config; \
echo "$$CONFIG_CC_VERSION_TEXT"))
ifneq ($(cc_version_text),)
ifeq ($(shell command -v $(cc_version_text)),)
$(warning WARNING: Unable to locate the compiler $(cc_version_text) \
from CONFIG_CC_VERSION_TEXT in the kernel configuration.)
else
CC=$(cc_version_text)
endif
endif
endif
CC ?= cc
LD ?= ld
OBJDUMP ?= objdump
AWK ?= awk
# Bake the following awk program in a string. The program is needed to add C++
# to the languages excluded from BTF generation.
#
# Also, unconditionally return success (0) from the awk program, rather than
# propagating pahole's return status (with 'exit system(pahole_cmd)'), to
# workaround an DW_TAG_rvalue_reference_type error in
# kernel/nvidia-modeset.ko.
#
# BEGIN {
# pahole_cmd = "pahole"
# for (i = 1; i < ARGC; i++) {
# if (ARGV[i] ~ /--lang_exclude=/) {
# pahole_cmd = pahole_cmd sprintf(" %s,c++", ARGV[i])
# } else {
# pahole_cmd = pahole_cmd sprintf(" %s", ARGV[i])
# }
# }
# system(pahole_cmd)
# }
PAHOLE_AWK_PROGRAM = BEGIN { pahole_cmd = \"pahole\"; for (i = 1; i < ARGC; i++) { if (ARGV[i] ~ /--lang_exclude=/) { pahole_cmd = pahole_cmd sprintf(\" %s,c++\", ARGV[i]); } else { pahole_cmd = pahole_cmd sprintf(\" %s\", ARGV[i]); } } system(pahole_cmd); }
# If scripts/pahole-flags.sh is not present in the kernel tree, add PAHOLE and
# PAHOLE_AWK_PROGRAM assignments to PAHOLE_VARIABLES; otherwise assign the
# empty string to PAHOLE_VARIABLES.
PAHOLE_VARIABLES=$(if $(wildcard $(KERNEL_SOURCES)/scripts/pahole-flags.sh),,"PAHOLE=$(AWK) '$(PAHOLE_AWK_PROGRAM)'")
ifndef ARCH
ARCH := $(shell uname -m | sed -e 's/i.86/i386/' \
-e 's/armv[0-7]\w\+/arm/' \
-e 's/aarch64/arm64/' \
-e 's/ppc64le/powerpc/' \
-e 's/riscv64/riscv/' \
)
endif
KERNEL_ARCH = $(ARCH)
ifneq ($(filter $(ARCH),i386 x86_64),)
KERNEL_ARCH = x86
else
ifeq ($(filter $(ARCH),arm64 riscv),)
$(error Unsupported architecture $(ARCH))
endif
endif
NV_KERNEL_MODULES ?= $(wildcard nvidia nvidia-modeset nvidia-drm)
NV_KERNEL_MODULES := $(filter-out $(NV_EXCLUDE_KERNEL_MODULES), \
$(NV_KERNEL_MODULES))
INSTALL_MOD_DIR ?= kernel/drivers/video
NV_VERBOSE ?=
SPECTRE_V2_RETPOLINE ?= 0
@@ -74,12 +130,13 @@ else
KBUILD_PARAMS += NV_KERNEL_SOURCES=$(KERNEL_SOURCES)
KBUILD_PARAMS += NV_KERNEL_OUTPUT=$(KERNEL_OUTPUT)
KBUILD_PARAMS += NV_KERNEL_MODULES="$(NV_KERNEL_MODULES)"
KBUILD_PARAMS += INSTALL_MOD_DIR=kernel/drivers/video
KBUILD_PARAMS += INSTALL_MOD_DIR="$(INSTALL_MOD_DIR)"
KBUILD_PARAMS += NV_SPECTRE_V2=$(SPECTRE_V2_RETPOLINE)
.PHONY: modules module clean clean_conftest modules_install
modules clean modules_install:
@$(MAKE) "LD=$(LD)" "CC=$(CC)" "OBJDUMP=$(OBJDUMP)" $(KBUILD_PARAMS) $@
@$(MAKE) "LD=$(LD)" "CC=$(CC)" "OBJDUMP=$(OBJDUMP)" \
$(PAHOLE_VARIABLES) $(KBUILD_PARAMS) $@
@if [ "$@" = "modules" ]; then \
for module in $(NV_KERNEL_MODULES); do \
if [ -x split-object-file.sh ]; then \
@@ -99,8 +156,9 @@ else
# module symbols on which the Linux kernel's module resolution is dependent
# and hence must be used whenever present.
LD_SCRIPT ?= $(KERNEL_SOURCES)/scripts/module-common.lds \
$(KERNEL_SOURCES)/arch/$(ARCH)/kernel/module.lds \
LD_SCRIPT ?= $(KERNEL_SOURCES)/scripts/module-common.lds \
$(KERNEL_SOURCES)/arch/$(KERNEL_ARCH)/kernel/module.lds \
$(KERNEL_OUTPUT)/arch/$(KERNEL_ARCH)/module.lds \
$(KERNEL_OUTPUT)/scripts/module.lds
NV_MODULE_COMMON_SCRIPTS := $(foreach s, $(wildcard $(LD_SCRIPT)), -T $(s))

View File

@@ -0,0 +1,43 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_CHARDEV_NUMBERS_H_
#define _NV_CHARDEV_NUMBERS_H_
// NVIDIA's reserved major character device number (Linux).
#define NV_MAJOR_DEVICE_NUMBER 195
// Minor numbers 0 to 247 reserved for regular devices
#define NV_MINOR_DEVICE_NUMBER_REGULAR_MAX 247
// Minor numbers 248 to 253 currently unused
// Minor number 254 reserved for the modeset device (provided by NVKMS)
#define NV_MINOR_DEVICE_NUMBER_MODESET_DEVICE 254
// Minor number 255 reserved for the control device
#define NV_MINOR_DEVICE_NUMBER_CONTROL_DEVICE 255
#endif // _NV_CHARDEV_NUMBERS_H_

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2022-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,7 +32,10 @@
typedef enum
{
NV_FIRMWARE_TYPE_GSP,
NV_FIRMWARE_TYPE_GSP_LOG
NV_FIRMWARE_TYPE_GSP_LOG,
#if defined(NV_VMWARE)
NV_FIRMWARE_TYPE_BINDATA
#endif
} nv_firmware_type_t;
typedef enum
@@ -42,6 +45,10 @@ typedef enum
NV_FIRMWARE_CHIP_FAMILY_TU11X = 2,
NV_FIRMWARE_CHIP_FAMILY_GA100 = 3,
NV_FIRMWARE_CHIP_FAMILY_GA10X = 4,
NV_FIRMWARE_CHIP_FAMILY_AD10X = 5,
NV_FIRMWARE_CHIP_FAMILY_GH100 = 6,
NV_FIRMWARE_CHIP_FAMILY_GB10X = 8,
NV_FIRMWARE_CHIP_FAMILY_GB10Y = 11,
NV_FIRMWARE_CHIP_FAMILY_END,
} nv_firmware_chip_family_t;
@@ -50,6 +57,10 @@ static inline const char *nv_firmware_chip_family_to_string(
)
{
switch (fw_chip_family) {
case NV_FIRMWARE_CHIP_FAMILY_GB10X: return "gb10x";
case NV_FIRMWARE_CHIP_FAMILY_GB10Y: return "gb10y";
case NV_FIRMWARE_CHIP_FAMILY_GH100: return "gh100";
case NV_FIRMWARE_CHIP_FAMILY_AD10X: return "ad10x";
case NV_FIRMWARE_CHIP_FAMILY_GA10X: return "ga10x";
case NV_FIRMWARE_CHIP_FAMILY_GA100: return "ga100";
case NV_FIRMWARE_CHIP_FAMILY_TU11X: return "tu11x";
@@ -57,18 +68,18 @@ static inline const char *nv_firmware_chip_family_to_string(
case NV_FIRMWARE_CHIP_FAMILY_END: // fall through
case NV_FIRMWARE_CHIP_FAMILY_NULL:
return NULL;
return "";
}
return NULL;
return "";
}
// The includer (presumably nv.c) may optionally define
// NV_FIRMWARE_PATH_FOR_FILENAME(filename)
// to return a string "path" given a gsp_*.bin or gsp_log_*.bin filename.
// The includer may optionally define
// NV_FIRMWARE_FOR_NAME(name)
// to return a platform-defined string for a given a gsp_* or gsp_log_* name.
//
// The function nv_firmware_path will then be available.
#if defined(NV_FIRMWARE_PATH_FOR_FILENAME)
static inline const char *nv_firmware_path(
// The function nv_firmware_for_chip_family will then be available.
#if defined(NV_FIRMWARE_FOR_NAME)
static inline const char *nv_firmware_for_chip_family(
nv_firmware_type_t fw_type,
nv_firmware_chip_family_t fw_chip_family
)
@@ -77,13 +88,17 @@ static inline const char *nv_firmware_path(
{
switch (fw_chip_family)
{
case NV_FIRMWARE_CHIP_FAMILY_GB10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GB10Y: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_ga10x.bin");
return NV_FIRMWARE_FOR_NAME("gsp_ga10x");
case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_tu10x.bin");
return NV_FIRMWARE_FOR_NAME("gsp_tu10x");
case NV_FIRMWARE_CHIP_FAMILY_END: // fall through
case NV_FIRMWARE_CHIP_FAMILY_NULL:
@@ -94,31 +109,40 @@ static inline const char *nv_firmware_path(
{
switch (fw_chip_family)
{
case NV_FIRMWARE_CHIP_FAMILY_GB10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GB10Y: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_log_ga10x.bin");
return NV_FIRMWARE_FOR_NAME("gsp_log_ga10x");
case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_log_tu10x.bin");
return NV_FIRMWARE_FOR_NAME("gsp_log_tu10x");
case NV_FIRMWARE_CHIP_FAMILY_END: // fall through
case NV_FIRMWARE_CHIP_FAMILY_NULL:
return "";
}
}
#if defined(NV_VMWARE)
else if (fw_type == NV_FIRMWARE_TYPE_BINDATA)
{
return NV_FIRMWARE_FOR_NAME("bindata_image");
}
#endif
return "";
}
#endif // defined(NV_FIRMWARE_PATH_FOR_FILENAME)
#endif // defined(NV_FIRMWARE_FOR_NAME)
// The includer (presumably nv.c) may optionally define
// NV_FIRMWARE_DECLARE_GSP_FILENAME(filename)
// The includer may optionally define
// NV_FIRMWARE_DECLARE_GSP(name)
// which will then be invoked (at the top-level) for each
// gsp_*.bin (but not gsp_log_*.bin)
#if defined(NV_FIRMWARE_DECLARE_GSP_FILENAME)
NV_FIRMWARE_DECLARE_GSP_FILENAME("gsp_ga10x.bin")
NV_FIRMWARE_DECLARE_GSP_FILENAME("gsp_tu10x.bin")
#endif // defined(NV_FIRMWARE_DECLARE_GSP_FILENAME)
// gsp_* (but not gsp_log_*)
#if defined(NV_FIRMWARE_DECLARE_GSP)
NV_FIRMWARE_DECLARE_GSP("gsp_ga10x")
NV_FIRMWARE_DECLARE_GSP("gsp_tu10x")
#endif // defined(NV_FIRMWARE_DECLARE_GSP)
#endif // NV_FIRMWARE_DECLARE_GSP_FILENAME
#endif // NV_FIRMWARE_DECLARE_GSP

View File

@@ -29,17 +29,9 @@
#include <linux/kernel.h>
#include <linux/hash.h>
#if defined(NV_LINUX_STRINGHASH_H_PRESENT)
#include <linux/stringhash.h> /* full_name_hash() */
#else
#include <linux/dcache.h>
#endif
#if (NV_FULL_NAME_HASH_ARGUMENT_COUNT == 3)
#define nv_string_hash(_str) full_name_hash(NULL, _str, strlen(_str))
#else
#define nv_string_hash(_str) full_name_hash(_str, strlen(_str))
#endif
/**
* This naive hashtable was introduced by commit d9b482c8ba19 (v3.7, 2012-10-31).

View File

@@ -37,13 +37,11 @@ typedef enum _HYPERVISOR_TYPE
OS_HYPERVISOR_UNKNOWN
} HYPERVISOR_TYPE;
#define CMD_VGPU_VFIO_WAKE_WAIT_QUEUE 0
#define CMD_VGPU_VFIO_INJECT_INTERRUPT 1
#define CMD_VGPU_VFIO_REGISTER_MDEV 2
#define CMD_VGPU_VFIO_PRESENT 3
#define CMD_VFIO_PCI_CORE_PRESENT 4
#define CMD_VFIO_WAKE_REMOVE_GPU 1
#define CMD_VGPU_VFIO_PRESENT 2
#define CMD_VFIO_PCI_CORE_PRESENT 3
#define MAX_VF_COUNT_PER_GPU 64
#define MAX_VF_COUNT_PER_GPU 64
typedef enum _VGPU_TYPE_INFO
{
@@ -54,17 +52,11 @@ typedef enum _VGPU_TYPE_INFO
typedef struct
{
void *vgpuVfioRef;
void *waitQueue;
void *nv;
NvU32 *vgpuTypeIds;
NvU8 **vgpuNames;
NvU32 numVgpuTypes;
NvU32 domain;
NvU8 bus;
NvU8 slot;
NvU8 function;
NvBool is_virtfn;
NvU32 domain;
NvU32 bus;
NvU32 device;
NvU32 return_status;
} vgpu_vfio_info;
typedef struct

View File

@@ -25,14 +25,12 @@
#ifndef NV_IOCTL_NUMA_H
#define NV_IOCTL_NUMA_H
#if defined(NV_LINUX)
#include <nv-ioctl-numbers.h>
#if defined(NV_KERNEL_INTERFACE_LAYER)
#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX)
#include <linux/types.h>
#elif defined (NV_KERNEL_INTERFACE_LAYER) && defined(NV_BSD)
#include <sys/stdint.h>
#else
#include <stdint.h>
@@ -81,5 +79,3 @@ typedef struct nv_ioctl_set_numa_status
#define NV_IOCTL_NUMA_STATUS_OFFLINE_FAILED 6
#endif
#endif

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -39,5 +39,6 @@
#define NV_ESC_QUERY_DEVICE_INTR (NV_IOCTL_BASE + 13)
#define NV_ESC_SYS_PARAMS (NV_IOCTL_BASE + 14)
#define NV_ESC_EXPORT_TO_DMABUF_FD (NV_IOCTL_BASE + 17)
#define NV_ESC_WAIT_OPEN_COMPLETE (NV_IOCTL_BASE + 18)
#endif

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -128,6 +128,9 @@ typedef struct nv_ioctl_register_fd
#define NV_DMABUF_EXPORT_MAX_HANDLES 128
#define NV_DMABUF_EXPORT_MAPPING_TYPE_DEFAULT 0
#define NV_DMABUF_EXPORT_MAPPING_TYPE_FORCE_PCIE 1
typedef struct nv_ioctl_export_to_dma_buf_fd
{
int fd;
@@ -136,10 +139,18 @@ typedef struct nv_ioctl_export_to_dma_buf_fd
NvU32 numObjects;
NvU32 index;
NvU64 totalSize NV_ALIGN_BYTES(8);
NvU8 mappingType;
NvBool bAllowMmap;
NvHandle handles[NV_DMABUF_EXPORT_MAX_HANDLES];
NvU64 offsets[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);
NvU64 sizes[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);
NvU32 status;
} nv_ioctl_export_to_dma_buf_fd_t;
typedef struct nv_ioctl_wait_open_complete
{
int rc;
NvU32 adapterStatus;
} nv_ioctl_wait_open_complete_t;
#endif

View File

@@ -0,0 +1,64 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NV_KTHREAD_QUEUE_OS_H__
#define __NV_KTHREAD_QUEUE_OS_H__
#include <linux/types.h> // atomic_t
#include <linux/list.h> // list
#include <linux/sched.h> // task_struct
#include <linux/numa.h> // NUMA_NO_NODE
#include <linux/semaphore.h>
#include "conftest.h"
struct nv_kthread_q
{
struct list_head q_list_head;
spinlock_t q_lock;
// This is a counting semaphore. It gets incremented and decremented
// exactly once for each item that is added to the queue.
struct semaphore q_sem;
atomic_t main_loop_should_exit;
struct task_struct *q_kthread;
bool is_unload_flush_ongoing;
};
struct nv_kthread_q_item
{
struct list_head q_list_node;
nv_q_func_t function_to_run;
void *function_args;
};
#ifndef NUMA_NO_NODE
#define NUMA_NO_NODE (-1)
#endif
#define NV_KTHREAD_NO_NODE NUMA_NO_NODE
#endif

View File

@@ -24,13 +24,14 @@
#ifndef __NV_KTHREAD_QUEUE_H__
#define __NV_KTHREAD_QUEUE_H__
#include <linux/types.h> // atomic_t
#include <linux/list.h> // list
#include <linux/sched.h> // task_struct
#include <linux/numa.h> // NUMA_NO_NODE
#include <linux/semaphore.h>
struct nv_kthread_q;
struct nv_kthread_q_item;
typedef struct nv_kthread_q nv_kthread_q_t;
typedef struct nv_kthread_q_item nv_kthread_q_item_t;
#include "conftest.h"
typedef void (*nv_q_func_t)(void *args);
#include "nv-kthread-q-os.h"
////////////////////////////////////////////////////////////////////////////////
// nv_kthread_q:
@@ -85,38 +86,6 @@
//
////////////////////////////////////////////////////////////////////////////////
typedef struct nv_kthread_q nv_kthread_q_t;
typedef struct nv_kthread_q_item nv_kthread_q_item_t;
typedef void (*nv_q_func_t)(void *args);
struct nv_kthread_q
{
struct list_head q_list_head;
spinlock_t q_lock;
// This is a counting semaphore. It gets incremented and decremented
// exactly once for each item that is added to the queue.
struct semaphore q_sem;
atomic_t main_loop_should_exit;
struct task_struct *q_kthread;
};
struct nv_kthread_q_item
{
struct list_head q_list_node;
nv_q_func_t function_to_run;
void *function_args;
};
#ifndef NUMA_NO_NODE
#define NUMA_NO_NODE (-1)
#endif
#define NV_KTHREAD_NO_NODE NUMA_NO_NODE
//
// The queue must not be used before calling this routine.
//
@@ -155,10 +124,7 @@ int nv_kthread_q_init_on_node(nv_kthread_q_t *q,
// This routine is the same as nv_kthread_q_init_on_node() with the exception
// that the queue stack will be allocated on the NUMA node of the caller.
//
static inline int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
{
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
}
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname);
//
// The caller is responsible for stopping all queues, by calling this routine

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2017-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,25 +28,12 @@
#include <linux/spinlock.h>
#include <linux/rwsem.h>
#include <linux/sched.h> /* signal_pending, cond_resched */
#include <linux/sched.h> /* cond_resched */
#include <linux/semaphore.h>
#include <linux/sched/signal.h> /* signal_pending */
#if defined(NV_LINUX_SCHED_SIGNAL_H_PRESENT)
#include <linux/sched/signal.h> /* signal_pending for kernels >= 4.11 */
#endif
#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL)
typedef raw_spinlock_t nv_spinlock_t;
#define NV_SPIN_LOCK_INIT(lock) raw_spin_lock_init(lock)
#define NV_SPIN_LOCK_IRQ(lock) raw_spin_lock_irq(lock)
#define NV_SPIN_UNLOCK_IRQ(lock) raw_spin_unlock_irq(lock)
#define NV_SPIN_LOCK_IRQSAVE(lock,flags) raw_spin_lock_irqsave(lock,flags)
#define NV_SPIN_UNLOCK_IRQRESTORE(lock,flags) raw_spin_unlock_irqrestore(lock,flags)
#define NV_SPIN_LOCK(lock) raw_spin_lock(lock)
#define NV_SPIN_UNLOCK(lock) raw_spin_unlock(lock)
#define NV_SPIN_UNLOCK_WAIT(lock) raw_spin_unlock_wait(lock)
#else
typedef spinlock_t nv_spinlock_t;
#define NV_DEFINE_SPINLOCK(lock) DEFINE_SPINLOCK(lock)
#define NV_SPIN_LOCK_INIT(lock) spin_lock_init(lock)
#define NV_SPIN_LOCK_IRQ(lock) spin_lock_irq(lock)
#define NV_SPIN_UNLOCK_IRQ(lock) spin_unlock_irq(lock)
@@ -55,7 +42,6 @@ typedef spinlock_t nv_spinlock_t;
#define NV_SPIN_LOCK(lock) spin_lock(lock)
#define NV_SPIN_UNLOCK(lock) spin_unlock(lock)
#define NV_SPIN_UNLOCK_WAIT(lock) spin_unlock_wait(lock)
#endif
#define NV_INIT_MUTEX(mutex) sema_init(mutex, 1)

View File

@@ -29,27 +29,33 @@
typedef int vm_fault_t;
#endif
/* pin_user_pages
/*
* pin_user_pages()
*
* Presence of pin_user_pages() also implies the presence of unpin-user_page().
* Both were added in the v5.6-rc1
* Both were added in the v5.6.
*
* pin_user_pages() was added by commit eddb1c228f7951d399240
* ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6-rc1 (2020-01-30)
*
* Removed vmas parameter from pin_user_pages() by commit 40896a02751
* ("mm/gup: remove vmas parameter from pin_user_pages()")
* in linux-next, expected in v6.5-rc1 (2023-05-17)
* pin_user_pages() was added by commit eddb1c228f79
* ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6.
*
* Removed vmas parameter from pin_user_pages() by commit 4c630f307455
* ("mm/gup: remove vmas parameter from pin_user_pages()") in v6.5.
*/
#include <linux/mm.h>
#include <linux/sched.h>
#if defined(NV_PIN_USER_PAGES_PRESENT)
/*
* FreeBSD's pin_user_pages's conftest breaks since pin_user_pages is an inline
* function. Because it simply maps to get_user_pages, we can just replace
* NV_PIN_USER_PAGES with NV_GET_USER_PAGES on FreeBSD
*/
#if defined(NV_PIN_USER_PAGES_PRESENT) && !defined(NV_BSD)
#if defined(NV_PIN_USER_PAGES_HAS_ARGS_VMAS)
#define NV_PIN_USER_PAGES pin_user_pages
#define NV_PIN_USER_PAGES(start, nr_pages, gup_flags, pages) \
pin_user_pages(start, nr_pages, gup_flags, pages, NULL)
#else
#define NV_PIN_USER_PAGES(start, nr_pages, gup_flags, pages, vmas) \
pin_user_pages(start, nr_pages, gup_flags, pages)
#define NV_PIN_USER_PAGES pin_user_pages
#endif // NV_PIN_USER_PAGES_HAS_ARGS_VMAS
#define NV_UNPIN_USER_PAGE unpin_user_page
#else
@@ -57,186 +63,77 @@ typedef int vm_fault_t;
#define NV_UNPIN_USER_PAGE put_page
#endif // NV_PIN_USER_PAGES_PRESENT
/* get_user_pages
/*
* get_user_pages()
*
* The 8-argument version of get_user_pages was deprecated by commit
* (2016 Feb 12: cde70140fed8429acf7a14e2e2cbd3e329036653)for the non-remote case
* (calling get_user_pages with current and current->mm).
*
* Completely moved to the 6 argument version of get_user_pages -
* 2016 Apr 4: c12d2da56d0e07d230968ee2305aaa86b93a6832
*
* write and force parameters were replaced with gup_flags by -
* 2016 Oct 12: 768ae309a96103ed02eb1e111e838c87854d8b51
*
* A 7-argument version of get_user_pages was introduced into linux-4.4.y by
* commit 8e50b8b07f462ab4b91bc1491b1c91bd75e4ad40 which cherry-picked the
* replacement of the write and force parameters with gup_flags
*
* Removed vmas parameter from get_user_pages() by commit 7bbf9c8c99
* ("mm/gup: remove unused vmas parameter from get_user_pages()")
* in linux-next, expected in v6.5-rc1 (2023-05-17)
* Removed vmas parameter from get_user_pages() by commit 54d020692b34
* ("mm/gup: remove unused vmas parameter from get_user_pages()") in v6.5.
*
*/
#if defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS)
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages, vmas) \
get_user_pages(start, nr_pages, flags, pages)
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS)
#if !defined(NV_GET_USER_PAGES_HAS_VMAS_ARG)
#define NV_GET_USER_PAGES get_user_pages
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS_VMAS)
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages, vmas) \
get_user_pages(current, current->mm, start, nr_pages, flags, pages, vmas)
#else
static inline long NV_GET_USER_PAGES(unsigned long start,
unsigned long nr_pages,
unsigned int flags,
struct page **pages,
struct vm_area_struct **vmas)
{
int write = flags & FOLL_WRITE;
int force = flags & FOLL_FORCE;
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages) \
get_user_pages(start, nr_pages, flags, pages, NULL)
#endif
#if defined(NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS)
return get_user_pages(start, nr_pages, write, force, pages, vmas);
#else
// NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS
return get_user_pages(current, current->mm, start, nr_pages, write,
force, pages, vmas);
#endif // NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS
}
#endif // NV_GET_USER_PAGES_HAS_ARGS_FLAGS
/* pin_user_pages_remote
/*
* pin_user_pages_remote()
*
* pin_user_pages_remote() was added by commit eddb1c228f7951d399240
* ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6 (2020-01-30)
* pin_user_pages_remote() was added by commit eddb1c228f79
* ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6.
*
* pin_user_pages_remote() removed 'tsk' parameter by commit
* 64019a2e467a ("mm/gup: remove task_struct pointer for all gup code")
* in v5.9-rc1 (2020-08-11). *
* 64019a2e467a ("mm/gup: remove task_struct pointer for all gup code")
* in v5.9.
*
* Removed unused vmas parameter from pin_user_pages_remote() by commit
* 83bcc2e132("mm/gup: remove unused vmas parameter from pin_user_pages_remote()")
* in linux-next, expected in v6.5-rc1 (2023-05-14)
* 0b295316b3a9 ("mm/gup: remove unused vmas parameter from
* pin_user_pages_remote()") in v6.5.
*
*/
#if defined(NV_PIN_USER_PAGES_REMOTE_PRESENT)
#if defined(NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS)
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
pin_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas, locked)
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
pin_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, NULL, locked)
#elif defined(NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS)
#define NV_PIN_USER_PAGES_REMOTE pin_user_pages_remote
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
pin_user_pages_remote(mm, start, nr_pages, flags, pages, NULL, locked)
#else
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
pin_user_pages_remote(mm, start, nr_pages, flags, pages, locked)
#define NV_PIN_USER_PAGES_REMOTE pin_user_pages_remote
#endif // NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS
#else
#define NV_PIN_USER_PAGES_REMOTE NV_GET_USER_PAGES_REMOTE
#endif // NV_PIN_USER_PAGES_REMOTE_PRESENT
/*
* get_user_pages_remote() was added by commit 1e9877902dc7
* ("mm/gup: Introduce get_user_pages_remote()") in v4.6 (2016-02-12).
*
* Note that get_user_pages_remote() requires the caller to hold a reference on
* the task_struct (if non-NULL and if this API has tsk argument) and the mm_struct.
* the mm_struct.
* This will always be true when using current and current->mm. If the kernel passes
* the driver a vma via driver callback, the kernel holds a reference on vma->vm_mm
* over that callback.
*
* get_user_pages_remote() write/force parameters were replaced
* with gup_flags by commit 9beae1ea8930 ("mm: replace get_user_pages_remote()
* write/force parameters with gup_flags") in v4.9 (2016-10-13).
*
* get_user_pages_remote() added 'locked' parameter by commit 5b56d49fc31d
* ("mm: add locked parameter to get_user_pages_remote()") in
* v4.10 (2016-12-14).
*
* get_user_pages_remote() removed 'tsk' parameter by
* commit 64019a2e467a ("mm/gup: remove task_struct pointer for
* all gup code") in v5.9-rc1 (2020-08-11).
* all gup code") in v5.9.
*
* Removed vmas parameter from get_user_pages_remote() by commit a4bde14d549
* ("mm/gup: remove vmas parameter from get_user_pages_remote()")
* in linux-next, expected in v6.5-rc1 (2023-05-14)
* Removed vmas parameter from get_user_pages_remote() by commit ca5e863233e8
* ("mm/gup: remove vmas parameter from get_user_pages_remote()") in v6.5.
*
*/
#if defined(NV_GET_USER_PAGES_REMOTE_PRESENT)
#if defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED)
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
get_user_pages_remote(mm, start, nr_pages, flags, pages, locked)
#if defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS)
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, NULL, locked)
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK)
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, locked) \
get_user_pages_remote(mm, start, nr_pages, flags, pages, NULL, locked)
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED_VMAS)
#define NV_GET_USER_PAGES_REMOTE get_user_pages_remote
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED_VMAS)
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas, locked)
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_VMAS)
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas)
#else
// NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE_VMAS
static inline long NV_GET_USER_PAGES_REMOTE(struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
unsigned int flags,
struct page **pages,
struct vm_area_struct **vmas,
int *locked)
{
int write = flags & FOLL_WRITE;
int force = flags & FOLL_FORCE;
return get_user_pages_remote(NULL, mm, start, nr_pages, write, force,
pages, vmas);
}
#endif // NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED
#else
#if defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS)
static inline long NV_GET_USER_PAGES_REMOTE(struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
unsigned int flags,
struct page **pages,
struct vm_area_struct **vmas,
int *locked)
{
int write = flags & FOLL_WRITE;
int force = flags & FOLL_FORCE;
return get_user_pages(NULL, mm, start, nr_pages, write, force, pages, vmas);
}
#else
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
get_user_pages(NULL, mm, start, nr_pages, flags, pages, vmas)
#endif // NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS
#endif // NV_GET_USER_PAGES_REMOTE_PRESENT
/*
* The .virtual_address field was effectively renamed to .address, by these
* two commits:
*
* struct vm_fault: .address was added by:
* 2016-12-14 82b0f8c39a3869b6fd2a10e180a862248736ec6f
*
* struct vm_fault: .virtual_address was removed by:
* 2016-12-14 1a29d85eb0f19b7d8271923d8917d7b4f5540b3e
*/
static inline unsigned long nv_page_fault_va(struct vm_fault *vmf)
{
#if defined(NV_VM_FAULT_HAS_ADDRESS)
return vmf->address;
#else
return (unsigned long)(vmf->virtual_address);
#define NV_GET_USER_PAGES_REMOTE get_user_pages_remote
#endif
}
static inline void nv_mmap_read_lock(struct mm_struct *mm)
{

View File

@@ -26,8 +26,7 @@
#include "nv-linux.h"
#if (defined(CONFIG_X86_LOCAL_APIC) || defined(NVCPU_AARCH64) || \
defined(NVCPU_PPC64LE)) && \
#if (defined(CONFIG_X86_LOCAL_APIC) || defined(NVCPU_AARCH64)) && \
(defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR))
#define NV_LINUX_PCIE_MSI_SUPPORTED
#endif
@@ -87,12 +86,6 @@ static inline int nv_pci_enable_msix(nv_linux_state_t *nvl, int nvec)
{
int rc = 0;
/*
* pci_enable_msix_range() replaced pci_enable_msix() in 3.14-rc1:
* 2014-01-03 302a2523c277bea0bbe8340312b09507905849ed
*/
#if defined(NV_PCI_ENABLE_MSIX_RANGE_PRESENT)
// We require all the vectors we are requesting so use the same min and max
rc = pci_enable_msix_range(nvl->pci_dev, nvl->msix_entries, nvec, nvec);
if (rc < 0)
@@ -100,13 +93,6 @@ static inline int nv_pci_enable_msix(nv_linux_state_t *nvl, int nvec)
return NV_ERR_OPERATING_SYSTEM;
}
WARN_ON(nvec != rc);
#else
rc = pci_enable_msix(nvl->pci_dev, nvl->msix_entries, nvec);
if (rc != 0)
{
return NV_ERR_OPERATING_SYSTEM;
}
#endif
nvl->num_intr = nvec;
return NV_OK;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -36,5 +36,6 @@ int nv_pci_count_devices(void);
NvU8 nv_find_pci_capability(struct pci_dev *, NvU8);
int nvidia_dev_get_pci_info(const NvU8 *, struct pci_dev **, NvU64 *, NvU64 *);
nv_linux_state_t * find_pci(NvU32, NvU8, NvU8, NvU8);
NvBool nv_pci_is_valid_topology_for_direct_pci(nv_state_t *, struct pci_dev *);
NvBool nv_pci_has_common_pci_switch(nv_state_t *nv, struct pci_dev *);
#endif

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2015-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -60,18 +60,13 @@ static inline pgprot_t pgprot_modify_writecombine(pgprot_t old_prot)
#endif /* !defined(NV_VMWARE) */
#if defined(NVCPU_AARCH64)
extern NvBool nvos_is_chipset_io_coherent(void);
/*
* Don't rely on the kernel's definition of pgprot_noncached(), as on 64-bit
* ARM that's not for system memory, but device memory instead. For I/O cache
* coherent systems, use cached mappings instead of uncached.
* ARM that's not for system memory, but device memory instead.
*/
#define NV_PGPROT_UNCACHED(old_prot) \
((nvos_is_chipset_io_coherent()) ? \
(old_prot) : \
__pgprot_modify((old_prot), PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)))
#elif defined(NVCPU_PPC64LE)
/* Don't attempt to mark sysmem pages as uncached on ppc64le */
#define NV_PGPROT_UNCACHED(old_prot) old_prot
__pgprot_modify((old_prot), PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
#else
#define NV_PGPROT_UNCACHED(old_prot) pgprot_noncached(old_prot)
#endif
@@ -93,32 +88,13 @@ static inline pgprot_t pgprot_modify_writecombine(pgprot_t old_prot)
NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot)
#define NV_PGPROT_READ_ONLY(old_prot) \
__pgprot(pgprot_val((old_prot)) & ~_PAGE_RW)
#elif defined(NVCPU_PPC64LE)
/*
* Some kernels use H_PAGE instead of _PAGE
*/
#if defined(_PAGE_RW)
#define NV_PAGE_RW _PAGE_RW
#elif defined(H_PAGE_RW)
#define NV_PAGE_RW H_PAGE_RW
#else
#warning "The kernel does not provide page protection defines!"
#endif
#if defined(_PAGE_4K_PFN)
#define NV_PAGE_4K_PFN _PAGE_4K_PFN
#elif defined(H_PAGE_4K_PFN)
#define NV_PAGE_4K_PFN H_PAGE_4K_PFN
#else
#undef NV_PAGE_4K_PFN
#endif
#elif defined(NVCPU_RISCV64)
#define NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) \
pgprot_writecombine(old_prot)
/* Don't attempt to mark sysmem pages as write combined on ppc64le */
#define NV_PGPROT_WRITE_COMBINED(old_prot) old_prot
/* Don't attempt to mark sysmem pages as write combined on riscv */
#define NV_PGPROT_WRITE_COMBINED(old_prot) old_prot
#define NV_PGPROT_READ_ONLY(old_prot) \
__pgprot(pgprot_val((old_prot)) & ~NV_PAGE_RW)
__pgprot(pgprot_val((old_prot)) & ~_PAGE_WRITE)
#else
/* Writecombine is not supported */
#undef NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot)

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2019-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -35,6 +35,14 @@ int nv_platform_count_devices(void);
int nv_soc_register_irqs(nv_state_t *nv);
void nv_soc_free_irqs(nv_state_t *nv);
int nv_disable_simplefb_clocks(void);
#define NV_SUPPORTS_PLATFORM_DEVICE NV_IS_EXPORT_SYMBOL_PRESENT___platform_driver_register
#if defined(NV_LINUX_PLATFORM_TEGRA_DCE_DCE_CLIENT_IPC_H_PRESENT)
#define NV_SUPPORTS_DCE_CLIENT_IPC 1
#else
#define NV_SUPPORTS_DCE_CLIENT_IPC 0
#endif
#define NV_SUPPORTS_PLATFORM_DISPLAY_DEVICE (NV_SUPPORTS_PLATFORM_DEVICE && NV_SUPPORTS_DCE_CLIENT_IPC)
#endif

View File

@@ -92,6 +92,24 @@ typedef struct file_operations nv_proc_ops_t;
#endif
#define NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \
static ssize_t nv_procfs_read_lock_##name( \
struct file *file, \
char __user *buf, \
size_t size, \
loff_t *ppos \
) \
{ \
int ret; \
ret = nv_down_read_interruptible(&lock); \
if (ret < 0) \
{ \
return ret; \
} \
size = seq_read(file, buf, size, ppos); \
up_read(&lock); \
return size; \
} \
\
static int nv_procfs_open_##name( \
struct inode *inode, \
struct file *filep \
@@ -104,11 +122,6 @@ typedef struct file_operations nv_proc_ops_t;
{ \
return ret; \
} \
ret = nv_down_read_interruptible(&lock); \
if (ret < 0) \
{ \
single_release(inode, filep); \
} \
return ret; \
} \
\
@@ -117,7 +130,6 @@ typedef struct file_operations nv_proc_ops_t;
struct file *filep \
) \
{ \
up_read(&lock); \
return single_release(inode, filep); \
}
@@ -127,46 +139,7 @@ typedef struct file_operations nv_proc_ops_t;
static const nv_proc_ops_t nv_procfs_##name##_fops = { \
NV_PROC_OPS_SET_OWNER() \
.NV_PROC_OPS_OPEN = nv_procfs_open_##name, \
.NV_PROC_OPS_READ = seq_read, \
.NV_PROC_OPS_LSEEK = seq_lseek, \
.NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \
};
#define NV_DEFINE_SINGLE_PROCFS_FILE_READ_WRITE(name, lock, \
write_callback) \
NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \
\
static ssize_t nv_procfs_write_##name( \
struct file *file, \
const char __user *buf, \
size_t size, \
loff_t *ppos \
) \
{ \
ssize_t ret; \
struct seq_file *s; \
\
s = file->private_data; \
if (s == NULL) \
{ \
return -EIO; \
} \
\
ret = write_callback(s, buf + *ppos, size - *ppos); \
if (ret == 0) \
{ \
/* avoid infinite loop */ \
ret = -EIO; \
} \
return ret; \
} \
\
static const nv_proc_ops_t nv_procfs_##name##_fops = { \
NV_PROC_OPS_SET_OWNER() \
.NV_PROC_OPS_OPEN = nv_procfs_open_##name, \
.NV_PROC_OPS_READ = seq_read, \
.NV_PROC_OPS_WRITE = nv_procfs_write_##name, \
.NV_PROC_OPS_READ = nv_procfs_read_lock_##name, \
.NV_PROC_OPS_LSEEK = seq_lseek, \
.NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \
};

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -25,11 +25,9 @@
#define _NV_PROTO_H_
#include "nv-pci.h"
#include "nv-register-module.h"
#include "nv-platform.h"
extern const char *nv_device_name;
extern nvidia_module_t nv_fops;
void nv_acpi_register_notifier (nv_linux_state_t *);
void nv_acpi_unregister_notifier (nv_linux_state_t *);
@@ -44,7 +42,7 @@ void nv_procfs_remove_gpu (nv_linux_state_t *);
int nvidia_mmap (struct file *, struct vm_area_struct *);
int nvidia_mmap_helper (nv_state_t *, nv_linux_file_private_t *, nvidia_stack_t *, struct vm_area_struct *, void *);
int nv_encode_caching (pgprot_t *, NvU32, NvU32);
int nv_encode_caching (pgprot_t *, NvU32, nv_memory_type_t);
void nv_revoke_gpu_mappings_locked(nv_state_t *);
NvUPtr nv_vm_map_pages (struct page **, NvU32, NvBool, NvBool);
@@ -59,9 +57,9 @@ int nv_uvm_init (void);
void nv_uvm_exit (void);
NV_STATUS nv_uvm_suspend (void);
NV_STATUS nv_uvm_resume (void);
void nv_uvm_notify_start_device (const NvU8 *uuid);
void nv_uvm_notify_stop_device (const NvU8 *uuid);
NV_STATUS nv_uvm_event_interrupt (const NvU8 *uuid);
NV_STATUS nv_uvm_drain_P2P (const NvU8 *uuid);
NV_STATUS nv_uvm_resume_P2P (const NvU8 *uuid);
/* Move these to nv.h once implemented by other UNIX platforms */
NvBool nvidia_get_gpuid_list (NvU32 *gpu_ids, NvU32 *gpu_count);
@@ -87,8 +85,11 @@ void nv_shutdown_adapter(nvidia_stack_t *, nv_state_t *, nv_linux_state
void nv_dev_free_stacks(nv_linux_state_t *);
NvBool nv_lock_init_locks(nvidia_stack_t *, nv_state_t *);
void nv_lock_destroy_locks(nvidia_stack_t *, nv_state_t *);
void nv_linux_add_device_locked(nv_linux_state_t *);
int nv_linux_add_device_locked(nv_linux_state_t *);
void nv_linux_remove_device_locked(nv_linux_state_t *);
NvBool nv_acpi_power_resource_method_present(struct pci_dev *);
int nv_linux_init_open_q(nv_linux_state_t *);
void nv_linux_stop_open_q(nv_linux_state_t *);
#endif /* _NV_PROTO_H_ */

View File

@@ -36,13 +36,6 @@
#define NV_MAX_ISR_DELAY_MS (NV_MAX_ISR_DELAY_US / 1000)
#define NV_NSECS_TO_JIFFIES(nsec) ((nsec) * HZ / 1000000000)
#if !defined(NV_TIMESPEC64_PRESENT)
struct timespec64 {
__s64 tv_sec;
long tv_nsec;
};
#endif
#if !defined(NV_KTIME_GET_RAW_TS64_PRESENT)
static inline void ktime_get_raw_ts64(struct timespec64 *ts64)
{
@@ -53,16 +46,6 @@ static inline void ktime_get_raw_ts64(struct timespec64 *ts64)
}
#endif
#if !defined(NV_KTIME_GET_REAL_TS64_PRESENT)
static inline void ktime_get_real_ts64(struct timespec64 *ts64)
{
struct timeval tv;
do_gettimeofday(&tv);
ts64->tv_sec = tv.tv_sec;
ts64->tv_nsec = tv.tv_usec * (NvU64) NSEC_PER_USEC;
}
#endif
static NvBool nv_timer_less_than
(
const struct timespec64 *a,
@@ -73,49 +56,6 @@ static NvBool nv_timer_less_than
: (a->tv_sec < b->tv_sec);
}
#if !defined(NV_TIMESPEC64_PRESENT)
static inline struct timespec64 timespec64_add
(
const struct timespec64 a,
const struct timespec64 b
)
{
struct timespec64 result;
result.tv_sec = a.tv_sec + b.tv_sec;
result.tv_nsec = a.tv_nsec + b.tv_nsec;
while (result.tv_nsec >= NSEC_PER_SEC)
{
++result.tv_sec;
result.tv_nsec -= NSEC_PER_SEC;
}
return result;
}
static inline struct timespec64 timespec64_sub
(
const struct timespec64 a,
const struct timespec64 b
)
{
struct timespec64 result;
result.tv_sec = a.tv_sec - b.tv_sec;
result.tv_nsec = a.tv_nsec - b.tv_nsec;
while (result.tv_nsec < 0)
{
--(result.tv_sec);
result.tv_nsec += NSEC_PER_SEC;
}
return result;
}
static inline s64 timespec64_to_ns(struct timespec64 *ts)
{
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
}
#endif
static inline NvU64 nv_ktime_get_raw_ns(void)
{
struct timespec64 ts;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2017-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -42,24 +42,20 @@ static inline void nv_timer_callback_typed_data(struct timer_list *timer)
nv_timer->nv_timer_callback(nv_timer);
}
static inline void nv_timer_callback_anon_data(unsigned long arg)
{
struct nv_timer *nv_timer = (struct nv_timer *)arg;
nv_timer->nv_timer_callback(nv_timer);
}
static inline void nv_timer_setup(struct nv_timer *nv_timer,
void (*callback)(struct nv_timer *nv_timer))
{
nv_timer->nv_timer_callback = callback;
#if defined(NV_TIMER_SETUP_PRESENT)
timer_setup(&nv_timer->kernel_timer, nv_timer_callback_typed_data, 0);
}
static inline void nv_timer_delete_sync(struct timer_list *timer)
{
#if !defined(NV_BSD) && NV_IS_EXPORT_SYMBOL_PRESENT_timer_delete_sync
timer_delete_sync(timer);
#else
init_timer(&nv_timer->kernel_timer);
nv_timer->kernel_timer.function = nv_timer_callback_anon_data;
nv_timer->kernel_timer.data = (unsigned long)nv_timer;
del_timer_sync(timer);
#endif
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -42,7 +42,9 @@
#include <nv-caps.h>
#include <nv-firmware.h>
#include <nv-ioctl.h>
#include <nv-ioctl-numa.h>
#include <nvmisc.h>
#include <os/nv_memory_area.h>
extern nv_cap_t *nvidia_caps_root;
@@ -53,9 +55,6 @@ extern const NvBool nv_is_rm_firmware_supported_os;
#include <nv-kernel-interface-api.h>
/* NVIDIA's reserved major character device number (Linux). */
#define NV_MAJOR_DEVICE_NUMBER 195
#define GPU_UUID_LEN (16)
/*
@@ -87,6 +86,20 @@ extern const NvBool nv_is_rm_firmware_supported_os;
#define NV_RM_DEVICE_INTR_ADDRESS 0x100
#define NV_TEGRA_PCI_IGPU_PG_MASK_DEFAULT 0xFFFFFFFF
/*
* Clock domain identifier, which is used for fetching the engine
* load backed by the specified clock domain for Tegra platforms
* conforming linux devfreq framework to realize dynamic frequency
* scaling.
*/
typedef enum _TEGRASOC_DEVFREQ_CLK
{
TEGRASOC_DEVFREQ_CLK_GPC,
TEGRASOC_DEVFREQ_CLK_NVD,
} TEGRASOC_DEVFREQ_CLK;
/*!
* @brief The order of the display clocks in the below defined enum
* should be synced with below mapping array and macro.
@@ -109,27 +122,45 @@ typedef enum _TEGRASOC_WHICH_CLK
TEGRASOC_WHICH_CLK_NVDISPLAY_DISP,
TEGRASOC_WHICH_CLK_NVDISPLAY_P0,
TEGRASOC_WHICH_CLK_NVDISPLAY_P1,
TEGRASOC_WHICH_CLK_NVDISPLAY_P2,
TEGRASOC_WHICH_CLK_NVDISPLAY_P3,
TEGRASOC_WHICH_CLK_NVDISPLAY_P4,
TEGRASOC_WHICH_CLK_NVDISPLAY_P5,
TEGRASOC_WHICH_CLK_NVDISPLAY_P6,
TEGRASOC_WHICH_CLK_NVDISPLAY_P7,
TEGRASOC_WHICH_CLK_DPAUX0,
TEGRASOC_WHICH_CLK_FUSE,
TEGRASOC_WHICH_CLK_DSIPLL_VCO,
TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN,
TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA,
TEGRASOC_WHICH_CLK_SPPLL0_VCO,
TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN,
TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA,
TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB,
TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN,
TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN,
TEGRASOC_WHICH_CLK_SPPLL0_DIV27,
TEGRASOC_WHICH_CLK_SPPLL1_DIV27,
TEGRASOC_WHICH_CLK_SPPLL0_DIV10,
TEGRASOC_WHICH_CLK_SPPLL0_DIV25,
TEGRASOC_WHICH_CLK_SPPLL0_DIV27,
TEGRASOC_WHICH_CLK_SPPLL1_VCO,
TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN,
TEGRASOC_WHICH_CLK_SPPLL1_DIV27,
TEGRASOC_WHICH_CLK_VPLL0_REF,
TEGRASOC_WHICH_CLK_VPLL0,
TEGRASOC_WHICH_CLK_VPLL1,
TEGRASOC_WHICH_CLK_VPLL2,
TEGRASOC_WHICH_CLK_VPLL3,
TEGRASOC_WHICH_CLK_VPLL4,
TEGRASOC_WHICH_CLK_VPLL5,
TEGRASOC_WHICH_CLK_VPLL6,
TEGRASOC_WHICH_CLK_VPLL7,
TEGRASOC_WHICH_CLK_NVDISPLAY_P0_REF,
TEGRASOC_WHICH_CLK_RG0,
TEGRASOC_WHICH_CLK_RG1,
TEGRASOC_WHICH_CLK_RG2,
TEGRASOC_WHICH_CLK_RG3,
TEGRASOC_WHICH_CLK_RG4,
TEGRASOC_WHICH_CLK_RG5,
TEGRASOC_WHICH_CLK_RG6,
TEGRASOC_WHICH_CLK_RG7,
TEGRASOC_WHICH_CLK_DISPPLL,
TEGRASOC_WHICH_CLK_DISPHUBPLL,
TEGRASOC_WHICH_CLK_DSI_LP,
@@ -137,9 +168,20 @@ typedef enum _TEGRASOC_WHICH_CLK
TEGRASOC_WHICH_CLK_DSI_PIXEL,
TEGRASOC_WHICH_CLK_PRE_SOR0,
TEGRASOC_WHICH_CLK_PRE_SOR1,
TEGRASOC_WHICH_CLK_DP_LINK_REF,
TEGRASOC_WHICH_CLK_PRE_SOR2,
TEGRASOC_WHICH_CLK_PRE_SOR3,
TEGRASOC_WHICH_CLK_DP_LINKA_REF,
TEGRASOC_WHICH_CLK_DP_LINKB_REF,
TEGRASOC_WHICH_CLK_DP_LINKC_REF,
TEGRASOC_WHICH_CLK_DP_LINKD_REF,
TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT,
TEGRASOC_WHICH_CLK_SOR_LINKB_INPUT,
TEGRASOC_WHICH_CLK_SOR_LINKC_INPUT,
TEGRASOC_WHICH_CLK_SOR_LINKD_INPUT,
TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO,
TEGRASOC_WHICH_CLK_SOR_LINKB_AFIFO,
TEGRASOC_WHICH_CLK_SOR_LINKC_AFIFO,
TEGRASOC_WHICH_CLK_SOR_LINKD_AFIFO,
TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M,
TEGRASOC_WHICH_CLK_RG0_M,
TEGRASOC_WHICH_CLK_RG1_M,
@@ -148,17 +190,36 @@ typedef enum _TEGRASOC_WHICH_CLK
TEGRASOC_WHICH_CLK_PLLHUB,
TEGRASOC_WHICH_CLK_SOR0,
TEGRASOC_WHICH_CLK_SOR1,
TEGRASOC_WHICH_CLK_SOR_PAD_INPUT,
TEGRASOC_WHICH_CLK_SOR2,
TEGRASOC_WHICH_CLK_SOR3,
TEGRASOC_WHICH_CLK_SOR_PADA_INPUT,
TEGRASOC_WHICH_CLK_SOR_PADB_INPUT,
TEGRASOC_WHICH_CLK_SOR_PADC_INPUT,
TEGRASOC_WHICH_CLK_SOR_PADD_INPUT,
TEGRASOC_WHICH_CLK_SOR0_PAD,
TEGRASOC_WHICH_CLK_SOR1_PAD,
TEGRASOC_WHICH_CLK_SOR2_PAD,
TEGRASOC_WHICH_CLK_SOR3_PAD,
TEGRASOC_WHICH_CLK_PRE_SF0,
TEGRASOC_WHICH_CLK_SF0,
TEGRASOC_WHICH_CLK_SF1,
TEGRASOC_WHICH_CLK_SF2,
TEGRASOC_WHICH_CLK_SF3,
TEGRASOC_WHICH_CLK_SF4,
TEGRASOC_WHICH_CLK_SF5,
TEGRASOC_WHICH_CLK_SF6,
TEGRASOC_WHICH_CLK_SF7,
TEGRASOC_WHICH_CLK_DSI_PAD_INPUT,
TEGRASOC_WHICH_CLK_PRE_SOR0_REF,
TEGRASOC_WHICH_CLK_PRE_SOR1_REF,
TEGRASOC_WHICH_CLK_SOR0_PLL_REF,
TEGRASOC_WHICH_CLK_SOR1_PLL_REF,
TEGRASOC_WHICH_CLK_SOR2_PLL_REF,
TEGRASOC_WHICH_CLK_SOR3_PLL_REF,
TEGRASOC_WHICH_CLK_SOR0_REF,
TEGRASOC_WHICH_CLK_SOR1_REF,
TEGRASOC_WHICH_CLK_SOR2_REF,
TEGRASOC_WHICH_CLK_SOR3_REF,
TEGRASOC_WHICH_CLK_OSC,
TEGRASOC_WHICH_CLK_DSC,
TEGRASOC_WHICH_CLK_MAUD,
@@ -172,6 +233,27 @@ typedef enum _TEGRASOC_WHICH_CLK
TEGRASOC_WHICH_CLK_PLLA_DISP,
TEGRASOC_WHICH_CLK_PLLA_DISPHUB,
TEGRASOC_WHICH_CLK_PLLA,
TEGRASOC_WHICH_CLK_VPLLX_SOR0_MUXED,
TEGRASOC_WHICH_CLK_VPLLX_SOR1_MUXED,
TEGRASOC_WHICH_CLK_VPLLX_SOR2_MUXED,
TEGRASOC_WHICH_CLK_VPLLX_SOR3_MUXED,
TEGRASOC_WHICH_CLK_SF0_SOR,
TEGRASOC_WHICH_CLK_SF1_SOR,
TEGRASOC_WHICH_CLK_SF2_SOR,
TEGRASOC_WHICH_CLK_SF3_SOR,
TEGRASOC_WHICH_CLK_SF4_SOR,
TEGRASOC_WHICH_CLK_SF5_SOR,
TEGRASOC_WHICH_CLK_SF6_SOR,
TEGRASOC_WHICH_CLK_SF7_SOR,
TEGRASOC_WHICH_CLK_EMC,
TEGRASOC_WHICH_CLK_GPU_FIRST,
TEGRASOC_WHICH_CLK_GPU_SYS = TEGRASOC_WHICH_CLK_GPU_FIRST,
TEGRASOC_WHICH_CLK_GPU_NVD,
TEGRASOC_WHICH_CLK_GPU_UPROC,
TEGRASOC_WHICH_CLK_GPU_GPC0,
TEGRASOC_WHICH_CLK_GPU_GPC1,
TEGRASOC_WHICH_CLK_GPU_GPC2,
TEGRASOC_WHICH_CLK_GPU_LAST = TEGRASOC_WHICH_CLK_GPU_GPC2,
TEGRASOC_WHICH_CLK_MAX, // TEGRASOC_WHICH_CLK_MAX is defined for boundary checks only.
} TEGRASOC_WHICH_CLK;
@@ -226,7 +308,6 @@ typedef struct
#define NV_RM_PAGE_MASK (NV_RM_PAGE_SIZE - 1)
#define NV_RM_TO_OS_PAGE_SHIFT (os_page_shift - NV_RM_PAGE_SHIFT)
#define NV_RM_PAGES_PER_OS_PAGE (1U << NV_RM_TO_OS_PAGE_SHIFT)
#define NV_RM_PAGES_TO_OS_PAGES(count) \
((((NvUPtr)(count)) >> NV_RM_TO_OS_PAGE_SHIFT) + \
((((count) & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) != 0) ? 1 : 0))
@@ -285,11 +366,9 @@ typedef struct nv_usermap_access_params_s
NvU64 offset;
NvU64 *page_array;
NvU64 num_pages;
NvU64 mmap_start;
NvU64 mmap_size;
MemoryArea memArea;
NvU64 access_start;
NvU64 access_size;
NvU64 remap_prot_extra;
NvBool contig;
NvU32 caching;
} nv_usermap_access_params_t;
@@ -302,11 +381,9 @@ typedef struct nv_alloc_mapping_context_s {
NvU64 page_index;
NvU64 *page_array;
NvU64 num_pages;
NvU64 mmap_start;
NvU64 mmap_size;
MemoryArea memArea;
NvU64 access_start;
NvU64 access_size;
NvU64 remap_prot_extra;
NvU32 prot;
NvBool valid;
NvU32 caching;
@@ -319,6 +396,8 @@ typedef enum
NV_SOC_IRQ_GPIO_TYPE,
NV_SOC_IRQ_HDACODEC_TYPE,
NV_SOC_IRQ_TCPC2DISP_TYPE,
NV_SOC_IRQ_HFRP0_TYPE,
NV_SOC_IRQ_HFRP1_TYPE,
NV_SOC_IRQ_INVALID_TYPE
} nv_soc_irq_type_t;
@@ -336,13 +415,12 @@ typedef struct nv_soc_irq_info_s {
NvS32 ref_count;
} nv_soc_irq_info_t;
#define NV_MAX_SOC_IRQS 6
#define NV_MAX_SOC_IRQS 10
#define NV_MAX_DPAUX_NUM_DEVICES 4
#define NV_MAX_SOC_DPAUX_NUM_DEVICES 2 // From SOC_DEV_MAPPING
#define NV_MAX_DPAUX_DEV_NAME_SIZE 10
#define NV_MAX_SOC_DPAUX_NUM_DEVICES 4
#define NV_IGPU_LEGACY_STALL_IRQ 70
#define NV_IGPU_MAX_STALL_IRQS 3
#define NV_IGPU_MAX_NONSTALL_IRQS 1
/*
* per device state
*/
@@ -373,6 +451,8 @@ typedef struct nv_state_t
{
NvBool valid;
NvU8 uuid[GPU_UUID_LEN];
NvBool pci_uuid_read_attempted;
NV_STATUS pci_uuid_status;
} nv_uuid_cache;
void *handle;
@@ -384,28 +464,35 @@ typedef struct nv_state_t
nv_aperture_t *dpaux[NV_MAX_DPAUX_NUM_DEVICES];
nv_aperture_t *hdacodec_regs;
nv_aperture_t *mipical_regs;
nv_aperture_t *hfrp0_regs;
nv_aperture_t *hfrp1_regs;
nv_aperture_t *fb, ud;
nv_aperture_t *simregs;
nv_aperture_t *emc_regs;
NvU32 num_dpaux_instance;
NvU32 interrupt_line;
NvU32 dpaux_irqs[NV_MAX_DPAUX_NUM_DEVICES];
char dpaux_devname[NV_MAX_DPAUX_NUM_DEVICES][NV_MAX_DPAUX_DEV_NAME_SIZE];
nv_soc_irq_info_t soc_irq_info[NV_MAX_SOC_IRQS];
NvS32 current_soc_irq;
NvU32 num_soc_irqs;
NvU32 hdacodec_irq;
NvU32 tcpc2disp_irq;
NvU32 hfrp0_irq;
NvU32 hfrp1_irq;
NvU8 *soc_dcb_blob;
NvU32 soc_dcb_size;
NvU32 disp_sw_soc_chip_id;
NvBool soc_is_dpalt_mode_supported;
NvBool soc_is_hfrp_supported;
NvU32 igpu_stall_irq[NV_IGPU_MAX_STALL_IRQS];
NvU32 igpu_nonstall_irq;
NvU32 num_stall_irqs;
NvU64 dma_mask;
NvBool is_tegra_pci_igpu;
NvBool supports_tegra_igpu_rg;
NvBool is_tegra_pci_igpu_rg_enabled;
NvU32 tegra_pci_igpu_pg_mask;
NvBool primary_vga;
NvU32 sim_env;
@@ -472,17 +559,9 @@ typedef struct nv_state_t
NvHandle hDisp;
} rmapi;
/* Bool to check if ISO iommu enabled */
NvBool iso_iommu_present;
/* Bool to check if NISO iommu enabled */
NvBool niso_iommu_present;
/* Bool to check if dma-buf is supported */
NvBool dma_buf_supported;
NvBool printed_openrm_enable_unsupported_gpus_error;
/* Check if NVPCF DSM function is implemented under NVPCF or GPU device scope */
NvBool nvpcf_dsm_in_gpu_scope;
@@ -491,13 +570,35 @@ typedef struct nv_state_t
/* Bool to check if the GPU has a coherent sysmem link */
NvBool coherent;
} nv_state_t;
// These define need to be in sync with defines in system.h
#define OS_TYPE_LINUX 0x1
#define OS_TYPE_FREEBSD 0x2
#define OS_TYPE_SUNOS 0x3
#define OS_TYPE_VMWARE 0x4
/*
* Bool to check if GPU memory is backed by struct page.
* False for non-coherent platforms. May also be false
* on coherent platforms if GPU memory is not onlined to the kernel.
*/
NvBool mem_has_struct_page;
/* OS detected GPU has ATS capability */
NvBool ats_support;
/*
* NUMA node ID of the CPU to which the GPU is attached.
* Holds NUMA_NO_NODE on platforms that don't support NUMA configuration.
*/
NvS32 cpu_numa_node_id;
struct {
/* Bool to check if ISO iommu enabled */
NvBool iso_iommu_present;
/* Bool to check if NISO iommu enabled */
NvBool niso_iommu_present;
/* Display SMMU Stream IDs */
NvU32 dispIsoStreamId;
NvU32 dispNisoStreamId;
} iommus;
/* Console is managed by drm drivers or NVKMS */
NvBool client_managed_console;
} nv_state_t;
#define NVFP_TYPE_NONE 0x0
#define NVFP_TYPE_REFCOUNTED 0x1
@@ -508,6 +609,7 @@ struct nv_file_private_t
NvHandle *handles;
NvU16 maxHandles;
NvU32 deviceInstance;
NvU32 gpuInstanceId;
NvU8 metadata[64];
nv_file_private_t *ctl_nvfp;
@@ -536,16 +638,18 @@ typedef struct UvmGpuAddressSpaceInfo_tag *nvgpuAddressSpaceInfo_t;
typedef struct UvmGpuAllocInfo_tag *nvgpuAllocInfo_t;
typedef struct UvmGpuP2PCapsParams_tag *nvgpuP2PCapsParams_t;
typedef struct UvmGpuFbInfo_tag *nvgpuFbInfo_t;
typedef struct UvmGpuNvlinkInfo_tag *nvgpuNvlinkInfo_t;
typedef struct UvmGpuEccInfo_tag *nvgpuEccInfo_t;
typedef struct UvmGpuFaultInfo_tag *nvgpuFaultInfo_t;
typedef struct UvmGpuAccessCntrInfo_tag *nvgpuAccessCntrInfo_t;
typedef struct UvmGpuAccessCntrConfig_tag *nvgpuAccessCntrConfig_t;
typedef struct UvmGpuInfo_tag nvgpuInfo_t;
typedef struct UvmGpuClientInfo_tag nvgpuClientInfo_t;
typedef struct UvmGpuAccessCntrConfig_tag nvgpuAccessCntrConfig_t;
typedef struct UvmGpuInfo_tag nvgpuInfo_t;
typedef struct UvmGpuClientInfo_tag nvgpuClientInfo_t;
typedef struct UvmPmaAllocationOptions_tag *nvgpuPmaAllocationOptions_t;
typedef struct UvmPmaStatistics_tag *nvgpuPmaStatistics_t;
typedef struct UvmGpuMemoryInfo_tag *nvgpuMemoryInfo_t;
typedef struct UvmGpuExternalMappingInfo_tag *nvgpuExternalMappingInfo_t;
typedef struct UvmGpuExternalPhysAddrInfo_tag *nvgpuExternalPhysAddrInfo_t;
typedef struct UvmGpuChannelResourceInfo_tag *nvgpuChannelResourceInfo_t;
typedef struct UvmGpuChannelInstanceInfo_tag *nvgpuChannelInstanceInfo_t;
typedef struct UvmGpuChannelResourceBindParams_tag *nvgpuChannelResourceBindParams_t;
@@ -560,23 +664,23 @@ typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64, nvgpuGpuMemor
* flags
*/
#define NV_FLAG_OPEN 0x0001
#define NV_FLAG_EXCLUDE 0x0002
#define NV_FLAG_CONTROL 0x0004
// Unused 0x0008
#define NV_FLAG_SOC_DISPLAY 0x0010
#define NV_FLAG_USES_MSI 0x0020
#define NV_FLAG_USES_MSIX 0x0040
#define NV_FLAG_PASSTHRU 0x0080
#define NV_FLAG_SUSPENDED 0x0100
#define NV_FLAG_SOC_IGPU 0x0200
// Unused 0x0400
#define NV_FLAG_PERSISTENT_SW_STATE 0x0800
#define NV_FLAG_IN_RECOVERY 0x1000
// Unused 0x2000
#define NV_FLAG_UNBIND_LOCK 0x4000
#define NV_FLAG_OPEN 0x0001
#define NV_FLAG_EXCLUDE 0x0002
#define NV_FLAG_CONTROL 0x0004
#define NV_FLAG_PCI_P2P_UNSUPPORTED_CHIPSET 0x0008
#define NV_FLAG_SOC_DISPLAY 0x0010
#define NV_FLAG_USES_MSI 0x0020
#define NV_FLAG_USES_MSIX 0x0040
#define NV_FLAG_PASSTHRU 0x0080
#define NV_FLAG_SUSPENDED 0x0100
/* To be set when an FLR needs to be triggered after device shut down. */
#define NV_FLAG_TRIGGER_FLR 0x0400
#define NV_FLAG_PERSISTENT_SW_STATE 0x0800
#define NV_FLAG_IN_RECOVERY 0x1000
#define NV_FLAG_PCI_REMOVE_IN_PROGRESS 0x2000
#define NV_FLAG_UNBIND_LOCK 0x4000
/* To be set when GPU is not present on the bus, to help device teardown */
#define NV_FLAG_IN_SURPRISE_REMOVAL 0x8000
#define NV_FLAG_IN_SURPRISE_REMOVAL 0x8000
typedef enum
{
@@ -606,23 +710,46 @@ typedef enum
NV_POWER_STATE_RUNNING
} nv_power_state_t;
typedef struct
{
const char *vidmem_power_status;
const char *dynamic_power_status;
const char *gc6_support;
const char *gcoff_support;
const char *s0ix_status;
const char *db_support;
} nv_power_info_t;
typedef enum
{
NV_MEMORY_TYPE_SYSTEM, /* Memory mapped for ROM, SBIOS and physical RAM. */
NV_MEMORY_TYPE_REGISTERS,
NV_MEMORY_TYPE_FRAMEBUFFER,
NV_MEMORY_TYPE_DEVICE_MMIO, /* All kinds of MMIO referred by NVRM e.g. BARs and MCFG of device */
} nv_memory_type_t;
#define NV_PRIMARY_VGA(nv) ((nv)->primary_vga)
#define NV_IS_CTL_DEVICE(nv) ((nv)->flags & NV_FLAG_CONTROL)
#define NV_IS_SOC_DISPLAY_DEVICE(nv) \
((nv)->flags & NV_FLAG_SOC_DISPLAY)
#define NV_IS_SOC_IGPU_DEVICE(nv) \
((nv)->flags & NV_FLAG_SOC_IGPU)
#define NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv) \
(((nv)->flags & NV_FLAG_IN_SURPRISE_REMOVAL) != 0)
/*
* For console setup by EFI GOP, the base address is BAR1.
* For console setup by VBIOS, the base address is BAR2 + 16MB.
*/
#define NV_IS_CONSOLE_MAPPED(nv, addr) \
(((addr) == (nv)->bars[NV_GPU_BAR_INDEX_FB].cpu_address) || \
((addr) == ((nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000)))
#define NV_SOC_IS_ISO_IOMMU_PRESENT(nv) \
((nv)->iso_iommu_present)
((nv)->iommus.iso_iommu_present)
#define NV_SOC_IS_NISO_IOMMU_PRESENT(nv) \
((nv)->niso_iommu_present)
((nv)->iommus.niso_iommu_present)
/*
* GPU add/remove events
*/
@@ -741,6 +868,7 @@ static inline NvBool IS_IMEM_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
#define NV_ALIGN_DOWN(v,g) ((v) & ~((g) - 1))
#endif
/*
* driver internal interfaces
*/
@@ -755,9 +883,9 @@ static inline NvBool IS_IMEM_OFFSET(nv_state_t *nv, NvU64 offset, NvU64 length)
NvU32 NV_API_CALL nv_get_dev_minor (nv_state_t *);
void* NV_API_CALL nv_alloc_kernel_mapping (nv_state_t *, void *, NvU64, NvU32, NvU64, void **);
NV_STATUS NV_API_CALL nv_free_kernel_mapping (nv_state_t *, void *, void *, void *);
void NV_API_CALL nv_free_kernel_mapping (nv_state_t *, void *, void *, void *);
NV_STATUS NV_API_CALL nv_alloc_user_mapping (nv_state_t *, void *, NvU64, NvU32, NvU64, NvU32, NvU64 *, void **);
NV_STATUS NV_API_CALL nv_free_user_mapping (nv_state_t *, void *, NvU64, void *);
void NV_API_CALL nv_free_user_mapping (nv_state_t *, void *, NvU64, void *);
NV_STATUS NV_API_CALL nv_add_mapping_context_to_file (nv_state_t *, nv_usermap_access_params_t*, NvU32, void *, NvU64, NvU32);
NvU64 NV_API_CALL nv_get_kern_phys_address (NvU64);
@@ -767,11 +895,11 @@ nv_state_t* NV_API_CALL nv_get_ctl_state (void);
void NV_API_CALL nv_set_dma_address_size (nv_state_t *, NvU32 );
NV_STATUS NV_API_CALL nv_alias_pages (nv_state_t *, NvU32, NvU32, NvU32, NvU64, NvU64 *, void **);
NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvBool, NvU32, NvBool, NvBool, NvS32, NvU64 *, void **);
NV_STATUS NV_API_CALL nv_alias_pages (nv_state_t *, NvU32, NvU64, NvU32, NvU32, NvU64, NvU64 *, NvBool, void **);
NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvU64, NvBool, NvU32, NvBool, NvBool, NvS32, NvU64 *, void **);
NV_STATUS NV_API_CALL nv_free_pages (nv_state_t *, NvU32, NvBool, NvU32, void *);
NV_STATUS NV_API_CALL nv_register_user_pages (nv_state_t *, NvU64, NvU64 *, void *, void **);
NV_STATUS NV_API_CALL nv_register_user_pages (nv_state_t *, NvU64, NvU64 *, void *, void **, NvBool);
void NV_API_CALL nv_unregister_user_pages (nv_state_t *, NvU64, void **, void **);
NV_STATUS NV_API_CALL nv_register_peer_io_mem (nv_state_t *, NvU64 *, NvU64, void **);
@@ -779,26 +907,26 @@ void NV_API_CALL nv_unregister_peer_io_mem(nv_state_t *, void *);
struct sg_table;
NV_STATUS NV_API_CALL nv_register_sgt (nv_state_t *, NvU64 *, NvU64, NvU32, void **, struct sg_table *, void *);
NV_STATUS NV_API_CALL nv_register_sgt (nv_state_t *, NvU64 *, NvU64, NvU32, void **,
struct sg_table *, void *, NvBool);
void NV_API_CALL nv_unregister_sgt (nv_state_t *, struct sg_table **, void **, void *);
NV_STATUS NV_API_CALL nv_register_phys_pages (nv_state_t *, NvU64 *, NvU64, NvU32, void **);
void NV_API_CALL nv_unregister_phys_pages (nv_state_t *, void *);
NV_STATUS NV_API_CALL nv_dma_map_sgt (nv_dma_device_t *, NvU64, NvU64 *, NvU32, void **);
NV_STATUS NV_API_CALL nv_dma_map_pages (nv_dma_device_t *, NvU64, NvU64 *, NvBool, NvU32, void **);
NV_STATUS NV_API_CALL nv_dma_unmap_pages (nv_dma_device_t *, NvU64, NvU64 *, void **);
NV_STATUS NV_API_CALL nv_dma_map_alloc (nv_dma_device_t *, NvU64, NvU64 *, NvBool, void **);
NV_STATUS NV_API_CALL nv_dma_unmap_alloc (nv_dma_device_t *, NvU64, NvU64 *, void **);
NV_STATUS NV_API_CALL nv_dma_map_peer (nv_dma_device_t *, nv_dma_device_t *, NvU8, NvU64, NvU64 *);
NV_STATUS NV_API_CALL nv_dma_map_non_pci_peer (nv_dma_device_t *, NvU64, NvU64 *);
void NV_API_CALL nv_dma_unmap_peer (nv_dma_device_t *, NvU64, NvU64);
NV_STATUS NV_API_CALL nv_dma_map_mmio (nv_dma_device_t *, NvU64, NvU64 *);
void NV_API_CALL nv_dma_unmap_mmio (nv_dma_device_t *, NvU64, NvU64);
void NV_API_CALL nv_dma_cache_invalidate (nv_dma_device_t *, void *);
void NV_API_CALL nv_dma_enable_nvlink (nv_dma_device_t *);
NvBool NV_API_CALL nv_grdma_pci_topology_supported(nv_state_t *, nv_dma_device_t *);
NvS32 NV_API_CALL nv_start_rc_timer (nv_state_t *);
NvS32 NV_API_CALL nv_stop_rc_timer (nv_state_t *);
@@ -813,6 +941,7 @@ void NV_API_CALL nv_acpi_methods_init (NvU32 *);
void NV_API_CALL nv_acpi_methods_uninit (void);
NV_STATUS NV_API_CALL nv_acpi_method (NvU32, NvU32, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *);
NV_STATUS NV_API_CALL nv_acpi_d3cold_dsm_for_upstream_port (nv_state_t *, NvU8 *, NvU32, NvU32, NvU32 *);
NV_STATUS NV_API_CALL nv_acpi_dsm_method (nv_state_t *, NvU8 *, NvU32, NvBool, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *);
NV_STATUS NV_API_CALL nv_acpi_ddc_method (nv_state_t *, void *, NvU32 *, NvBool);
NV_STATUS NV_API_CALL nv_acpi_dod_method (nv_state_t *, NvU32 *, NvU32 *);
@@ -824,9 +953,7 @@ NV_STATUS NV_API_CALL nv_acpi_mux_method (nv_state_t *, NvU32 *, NvU32,
NV_STATUS NV_API_CALL nv_log_error (nv_state_t *, NvU32, const char *, va_list);
NvU64 NV_API_CALL nv_get_dma_start_address (nv_state_t *);
NV_STATUS NV_API_CALL nv_set_primary_vga_status(nv_state_t *);
NV_STATUS NV_API_CALL nv_pci_trigger_recovery (nv_state_t *);
NvBool NV_API_CALL nv_requires_dma_remap (nv_state_t *);
NvBool NV_API_CALL nv_is_rm_firmware_active(nv_state_t *);
@@ -836,21 +963,11 @@ void NV_API_CALL nv_put_firmware(const void *);
nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **);
void NV_API_CALL nv_put_file_private(void *);
NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU32 *, NvS32 *);
NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *, NvU64 *, NvU64 *, void**);
NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, NvBool *mode);
void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv);
void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64, NvU64);
NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU64 *, NvU32 *, NvS32 *);
NV_STATUS NV_API_CALL nv_get_egm_info(nv_state_t *, NvU64 *, NvU64 *, NvS32 *);
void NV_API_CALL nv_p2p_free_platform_data(void *data);
#if defined(NVCPU_PPC64LE)
NV_STATUS NV_API_CALL nv_get_nvlink_line_rate (nv_state_t *, NvU32 *);
#endif
NV_STATUS NV_API_CALL nv_revoke_gpu_mappings (nv_state_t *);
void NV_API_CALL nv_acquire_mmap_lock (nv_state_t *);
void NV_API_CALL nv_release_mmap_lock (nv_state_t *);
@@ -873,24 +990,32 @@ void NV_API_CALL nv_cap_drv_exit(void);
NvBool NV_API_CALL nv_is_gpu_accessible(nv_state_t *);
NvBool NV_API_CALL nv_match_gpu_os_info(nv_state_t *, void *);
NvU32 NV_API_CALL nv_get_os_type(void);
void NV_API_CALL nv_get_updated_emu_seg(NvU32 *start, NvU32 *end);
void NV_API_CALL nv_get_screen_info(nv_state_t *, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU32 *, NvU64 *);
void NV_API_CALL nv_set_gpu_pg_mask(nv_state_t *);
struct dma_buf;
typedef struct nv_dma_buf nv_dma_buf_t;
struct drm_gem_object;
NV_STATUS NV_API_CALL nv_dma_import_sgt (nv_dma_device_t *, struct sg_table *, struct drm_gem_object *);
void NV_API_CALL nv_dma_release_sgt(struct sg_table *, struct drm_gem_object *);
NV_STATUS NV_API_CALL nv_dma_import_dma_buf (nv_dma_device_t *, struct dma_buf *, NvU32 *, struct sg_table **, nv_dma_buf_t **);
NV_STATUS NV_API_CALL nv_dma_import_from_fd (nv_dma_device_t *, NvS32, NvU32 *, struct sg_table **, nv_dma_buf_t **);
NV_STATUS NV_API_CALL nv_dma_import_dma_buf (nv_dma_device_t *, struct dma_buf *, NvBool, NvU32 *, struct sg_table **, nv_dma_buf_t **);
NV_STATUS NV_API_CALL nv_dma_import_from_fd (nv_dma_device_t *, NvS32, NvBool, NvU32 *, struct sg_table **, nv_dma_buf_t **);
void NV_API_CALL nv_dma_release_dma_buf (nv_dma_buf_t *);
void NV_API_CALL nv_schedule_uvm_isr (nv_state_t *);
NV_STATUS NV_API_CALL nv_schedule_uvm_drain_p2p (NvU8 *);
void NV_API_CALL nv_schedule_uvm_resume_p2p (NvU8 *);
NvBool NV_API_CALL nv_platform_supports_s0ix (void);
NvBool NV_API_CALL nv_s2idle_pm_configured (void);
NvBool NV_API_CALL nv_pci_tegra_register_power_domain (nv_state_t *, NvBool);
NvBool NV_API_CALL nv_pci_tegra_pm_init (nv_state_t *);
void NV_API_CALL nv_pci_tegra_pm_deinit (nv_state_t *);
NvBool NV_API_CALL nv_is_chassis_notebook (void);
void NV_API_CALL nv_allow_runtime_suspend (nv_state_t *nv);
void NV_API_CALL nv_disallow_runtime_suspend (nv_state_t *nv);
@@ -899,26 +1024,23 @@ typedef void (*nvTegraDceClientIpcCallback)(NvU32, NvU32, NvU32, void *, void *)
NV_STATUS NV_API_CALL nv_get_num_phys_pages (void *, NvU32 *);
NV_STATUS NV_API_CALL nv_get_phys_pages (void *, void *, NvU32 *);
void NV_API_CALL nv_get_disp_smmu_stream_ids (nv_state_t *, NvU32 *, NvU32 *);
typedef struct TEGRA_IMP_IMPORT_DATA TEGRA_IMP_IMPORT_DATA;
typedef struct nv_i2c_msg_s nv_i2c_msg_t;
NV_STATUS NV_API_CALL nv_bpmp_send_mrq (nv_state_t *, NvU32, const void *, NvU32, void *, NvU32, NvS32 *, NvS32 *);
NV_STATUS NV_API_CALL nv_i2c_transfer(nv_state_t *, NvU32, NvU8, nv_i2c_msg_t *, int);
void NV_API_CALL nv_i2c_unregister_clients(nv_state_t *);
NV_STATUS NV_API_CALL nv_i2c_bus_status(nv_state_t *, NvU32, NvS32 *, NvS32 *);
NV_STATUS NV_API_CALL nv_clk_get_handles (nv_state_t *);
void NV_API_CALL nv_clk_clear_handles (nv_state_t *);
NV_STATUS NV_API_CALL nv_enable_clk (nv_state_t *, TEGRASOC_WHICH_CLK);
NvBool NV_API_CALL nv_is_clk_enabled (nv_state_t *, TEGRASOC_WHICH_CLK);
void NV_API_CALL nv_disable_clk (nv_state_t *, TEGRASOC_WHICH_CLK);
NV_STATUS NV_API_CALL nv_get_curr_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *);
NV_STATUS NV_API_CALL nv_get_max_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *);
NV_STATUS NV_API_CALL nv_get_min_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *);
NV_STATUS NV_API_CALL nv_set_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32);
NV_STATUS NV_API_CALL nv_set_parent (nv_state_t *, TEGRASOC_WHICH_CLK, TEGRASOC_WHICH_CLK);
NV_STATUS NV_API_CALL nv_get_parent (nv_state_t *, TEGRASOC_WHICH_CLK, TEGRASOC_WHICH_CLK*);
NV_STATUS NV_API_CALL nv_soc_device_reset (nv_state_t *);
NV_STATUS NV_API_CALL nv_imp_get_import_data (TEGRA_IMP_IMPORT_DATA *);
NV_STATUS NV_API_CALL nv_imp_enable_disable_rfl (nv_state_t *nv, NvBool bEnable);
NV_STATUS NV_API_CALL nv_imp_icc_set_bw (nv_state_t *nv, NvU32 avg_bw_kbps, NvU32 floor_bw_kbps);
NV_STATUS NV_API_CALL nv_get_num_dpaux_instances(nv_state_t *nv, NvU32 *num_instances);
NV_STATUS NV_API_CALL nv_get_tegra_brightness_level(nv_state_t *, NvU32 *);
NV_STATUS NV_API_CALL nv_set_tegra_brightness_level(nv_state_t *, NvU32);
NV_STATUS NV_API_CALL nv_soc_device_reset (nv_state_t *);
NV_STATUS NV_API_CALL nv_soc_pm_powergate (nv_state_t *);
NV_STATUS NV_API_CALL nv_soc_pm_unpowergate (nv_state_t *);
NV_STATUS NV_API_CALL nv_gpio_get_pin_state(nv_state_t *, NvU32, NvU32 *);
@@ -928,10 +1050,6 @@ NV_STATUS NV_API_CALL nv_gpio_get_pin_direction(nv_state_t *, NvU32, NvU32 *);
NV_STATUS NV_API_CALL nv_gpio_get_pin_number(nv_state_t *, NvU32, NvU32 *);
NvBool NV_API_CALL nv_gpio_get_pin_interrupt_status(nv_state_t *, NvU32, NvU32);
NV_STATUS NV_API_CALL nv_gpio_set_pin_interrupt(nv_state_t *, NvU32, NvU32);
NV_STATUS NV_API_CALL nv_get_num_dpaux_instances(nv_state_t *nv, NvU32 *num_instances);
NV_STATUS NV_API_CALL nv_get_tegra_brightness_level(nv_state_t *, NvU32 *);
NV_STATUS NV_API_CALL nv_set_tegra_brightness_level(nv_state_t *, NvU32);
NV_STATUS NV_API_CALL nv_get_syncpoint_aperture(NvU32, NvU64 *, NvU64 *, NvU32 *);
NvU32 NV_API_CALL nv_tegra_get_rm_interface_type(NvU32);
NV_STATUS NV_API_CALL nv_tegra_dce_register_ipc_client(NvU32, void *, nvTegraDceClientIpcCallback, NvU32 *);
NV_STATUS NV_API_CALL nv_tegra_dce_client_ipc_send_recv(NvU32, void *, NvU32);
@@ -955,6 +1073,17 @@ NvU32 NV_API_CALL nv_soc_tsec_event_register(nv_soc_tsec_cb_func_t cb_func,
NvU32 NV_API_CALL nv_soc_tsec_event_unregister(NvBool is_init_event);
void* NV_API_CALL nv_soc_tsec_alloc_mem_desc(NvU32 num_bytes, NvU32 *flcn_addr);
void NV_API_CALL nv_soc_tsec_free_mem_desc(void *mem_desc);
NvBool NV_API_CALL nv_is_clk_enabled (nv_state_t *, TEGRASOC_WHICH_CLK);
NV_STATUS NV_API_CALL nv_set_parent (nv_state_t *, TEGRASOC_WHICH_CLK, TEGRASOC_WHICH_CLK);
NV_STATUS NV_API_CALL nv_get_parent (nv_state_t *, TEGRASOC_WHICH_CLK, TEGRASOC_WHICH_CLK*);
NV_STATUS NV_API_CALL nv_clk_get_handles (nv_state_t *);
void NV_API_CALL nv_clk_clear_handles (nv_state_t *);
NV_STATUS NV_API_CALL nv_enable_clk (nv_state_t *, TEGRASOC_WHICH_CLK);
void NV_API_CALL nv_disable_clk (nv_state_t *, TEGRASOC_WHICH_CLK);
NV_STATUS NV_API_CALL nv_get_curr_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *);
NV_STATUS NV_API_CALL nv_get_max_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *);
NV_STATUS NV_API_CALL nv_get_min_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32 *);
NV_STATUS NV_API_CALL nv_set_freq (nv_state_t *, TEGRASOC_WHICH_CLK, NvU32);
/*
* ---------------------------------------------------------------------------
@@ -982,6 +1111,9 @@ NV_STATUS NV_API_CALL rm_ioctl (nvidia_stack_t *, nv_state_t *
NvBool NV_API_CALL rm_isr (nvidia_stack_t *, nv_state_t *, NvU32 *);
void NV_API_CALL rm_isr_bh (nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_isr_bh_unlocked (nvidia_stack_t *, nv_state_t *);
NvBool NV_API_CALL rm_is_msix_allowed (nvidia_stack_t *, nv_state_t *);
NvBool NV_API_CALL rm_wait_for_bar_firewall (nvidia_stack_t *, NvU32 domain, NvU8 bus, NvU8 device, NvU8 function, NvU16 devId, NvU16 subsystemId);
NV_STATUS NV_API_CALL rm_pmu_perfmon_get_load (nvidia_stack_t *, nv_state_t *, NvU32 *, TEGRASOC_DEVFREQ_CLK);
NV_STATUS NV_API_CALL rm_power_management (nvidia_stack_t *, nv_state_t *, nv_pm_action_t);
NV_STATUS NV_API_CALL rm_stop_user_channels (nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_restart_user_channels (nvidia_stack_t *, nv_state_t *);
@@ -1001,6 +1133,7 @@ void NV_API_CALL rm_parse_option_string (nvidia_stack_t *, const char *
char* NV_API_CALL rm_remove_spaces (const char *);
char* NV_API_CALL rm_string_token (char **, const char);
void NV_API_CALL rm_vgpu_vfio_set_driver_vm(nvidia_stack_t *, NvBool);
NV_STATUS NV_API_CALL rm_get_adapter_status_external(nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_run_rc_callback (nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_execute_work_item (nvidia_stack_t *, void *);
@@ -1028,20 +1161,28 @@ void NV_API_CALL rm_request_dnotifier_state (nvidia_stack_t *, n
void NV_API_CALL rm_disable_gpu_state_persistence (nvidia_stack_t *sp, nv_state_t *);
NV_STATUS NV_API_CALL rm_p2p_init_mapping (nvidia_stack_t *, NvU64, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU64, NvU64, NvU64, NvU64, void (*)(void *), void *);
NV_STATUS NV_API_CALL rm_p2p_destroy_mapping (nvidia_stack_t *, NvU64);
NV_STATUS NV_API_CALL rm_p2p_get_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU8 **, void *);
NV_STATUS NV_API_CALL rm_p2p_get_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU8 **, void *, NvBool *);
NV_STATUS NV_API_CALL rm_p2p_get_gpu_info (nvidia_stack_t *, NvU64, NvU64, NvU8 **, void **);
NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent (nvidia_stack_t *, NvU64, NvU64, void **, NvU64 *, NvU32 *, void *, void *);
NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent (nvidia_stack_t *, NvU64, NvU64, void **, NvU64 *, NvU32 *, NvBool, void *, void *, void **, NvBool *);
NV_STATUS NV_API_CALL rm_p2p_register_callback (nvidia_stack_t *, NvU64, NvU64, NvU64, void *, void (*)(void *), void *);
NV_STATUS NV_API_CALL rm_p2p_put_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, void *);
NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent(nvidia_stack_t *, void *, void *);
NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent(nvidia_stack_t *, void *, void *, void *);
NV_STATUS NV_API_CALL rm_p2p_dma_map_pages (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU64, NvU32, NvU64 *, void **);
NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *, void **);
NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle,
NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *, void **,
NvBool *, NvU32 *, NvBool *, nv_memory_type_t *);
void NV_API_CALL rm_dma_buf_undup_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle);
NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64, void *, nv_phys_addr_range_t **, NvU32 *);
void NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, nv_phys_addr_range_t **, NvU32);
NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle *, NvHandle *, NvHandle *, void **, NvBool *);
NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *,
NvHandle, NvHandle, MemoryRange,
NvU8, void *, NvBool, MemoryArea *);
void NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *,
NvHandle, NvHandle, NvU8, void *,
NvBool, MemoryArea);
NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *,
nv_state_t *, NvHandle, NvHandle,
NvU8, NvHandle *, NvHandle *,
NvHandle *, void **, NvBool *, NvBool *);
void NV_API_CALL rm_dma_buf_put_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, void *);
NV_STATUS NV_API_CALL rm_log_gpu_crash (nv_stack_t *, nv_state_t *);
void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd);
NvBool NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id);
@@ -1051,44 +1192,47 @@ NvBool NV_API_CALL rm_gpu_need_4k_page_isolation(nv_state_t *);
NvBool NV_API_CALL rm_is_chipset_io_coherent(nv_stack_t *);
NvBool NV_API_CALL rm_init_event_locks(nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_destroy_event_locks(nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_get_gpu_numa_info(nvidia_stack_t *, nv_state_t *, NvS32 *, NvU64 *, NvU64 *, NvU64 *, NvU32 *);
NV_STATUS NV_API_CALL rm_get_gpu_numa_info(nvidia_stack_t *, nv_state_t *, nv_ioctl_numa_info_t *);
NV_STATUS NV_API_CALL rm_gpu_numa_online(nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_gpu_numa_offline(nvidia_stack_t *, nv_state_t *);
NvBool NV_API_CALL rm_is_device_sequestered(nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_check_for_gpu_surprise_removal(nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_set_external_kernel_client_count(nvidia_stack_t *, nv_state_t *, NvBool);
NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup(nvidia_stack_t *, nv_state_t *);
NvBool NV_API_CALL rm_is_iommu_needed_for_sriov(nvidia_stack_t *, nv_state_t *);
NvBool NV_API_CALL rm_disable_iomap_wc(void);
void NV_API_CALL rm_init_tegra_dynamic_power_management(nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_init_dynamic_power_management(nvidia_stack_t *, nv_state_t *, NvBool);
void NV_API_CALL rm_cleanup_dynamic_power_management(nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_enable_dynamic_power_management(nvidia_stack_t *, nv_state_t *);
NV_STATUS NV_API_CALL rm_ref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t);
void NV_API_CALL rm_unref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t);
NV_STATUS NV_API_CALL rm_transition_dynamic_power(nvidia_stack_t *, nv_state_t *, NvBool);
const char* NV_API_CALL rm_get_vidmem_power_status(nvidia_stack_t *, nv_state_t *);
const char* NV_API_CALL rm_get_dynamic_power_management_status(nvidia_stack_t *, nv_state_t *);
const char* NV_API_CALL rm_get_gpu_gcx_support(nvidia_stack_t *, nv_state_t *, NvBool);
NV_STATUS NV_API_CALL rm_transition_dynamic_power(nvidia_stack_t *, nv_state_t *, NvBool, NvBool *);
void NV_API_CALL rm_get_power_info(nvidia_stack_t *, nv_state_t *, nv_power_info_t *);
void NV_API_CALL rm_acpi_notify(nvidia_stack_t *, nv_state_t *, NvU32);
void NV_API_CALL rm_acpi_nvpcf_notify(nvidia_stack_t *);
NvBool NV_API_CALL rm_is_altstack_in_use(void);
void NV_API_CALL rm_notify_gpu_addition(nvidia_stack_t *, nv_state_t *);
void NV_API_CALL rm_notify_gpu_removal(nvidia_stack_t *, nv_state_t *);
/* vGPU VFIO specific functions */
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32);
NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *,
NvU32 *, NvU32 *, NvU32);
NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16);
NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *, NvBool, NvU8, NvBool);
NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8);
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU32, void *, NvBool *);
NV_STATUS NV_API_CALL nv_vgpu_start(nvidia_stack_t *, const NvU8 *, void *, NvS32 *, NvU8 *, NvU32);
NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 **, NvU64 **, NvU32 *);
NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *,
NvU64 *, NvU64 *, NvU32 *, NvBool *, NvU8 *);
NV_STATUS NV_API_CALL nv_vgpu_update_sysfs_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU32 *);
NV_STATUS NV_API_CALL nv_vgpu_get_hbm_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU64 *);
NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *);
NV_STATUS NV_API_CALL nv_vgpu_update_request(nvidia_stack_t *, const NvU8 *, NvU32, NvU64 *, NvU64 *, const char *);
NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *);
NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *, NvU32, NvBool *);
NV_STATUS NV_API_CALL nv_gpu_unbind_event(nvidia_stack_t *, NvU32, NvBool *);
NV_STATUS NV_API_CALL nv_get_usermap_access_params(nv_state_t*, nv_usermap_access_params_t*);
NV_STATUS NV_API_CALL nv_check_usermap_access_params(nv_state_t*, const nv_usermap_access_params_t*);
nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t*);
void NV_API_CALL nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size);
@@ -1117,6 +1261,9 @@ NV_STATUS NV_API_CALL rm_run_nano_timer_callback(nvidia_stack_t *, nv_state_t
void NV_API_CALL nv_cancel_nano_timer(nv_state_t *, nv_nano_timer_t *);
void NV_API_CALL nv_destroy_nano_timer(nv_state_t *nv, nv_nano_timer_t *);
// Host1x specific functions.
NV_STATUS NV_API_CALL nv_get_syncpoint_aperture(NvU32, NvU64 *, NvU64 *, NvU32 *);
#if defined(NVCPU_X86_64)
static inline NvU64 nv_rdtsc(void)

View File

@@ -0,0 +1,120 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NV_COMMON_UTILS_H__
#define __NV_COMMON_UTILS_H__
#include "nvtypes.h"
#include "nvmisc.h"
#if !defined(TRUE)
#define TRUE NV_TRUE
#endif
#if !defined(FALSE)
#define FALSE NV_FALSE
#endif
#define NV_IS_UNSIGNED(x) ((__typeof__(x))-1 > 0)
/* Get the length of a statically-sized array. */
#define ARRAY_LEN(_arr) (sizeof(_arr) / sizeof(_arr[0]))
#define NV_INVALID_HEAD 0xFFFFFFFF
#define NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION (~0)
#if !defined(NV_MIN)
# define NV_MIN(a,b) (((a)<(b))?(a):(b))
#endif
#define NV_MIN3(a,b,c) NV_MIN(NV_MIN(a, b), c)
#define NV_MIN4(a,b,c,d) NV_MIN3(NV_MIN(a,b),c,d)
#if !defined(NV_MAX)
# define NV_MAX(a,b) (((a)>(b))?(a):(b))
#endif
#define NV_MAX3(a,b,c) NV_MAX(NV_MAX(a, b), c)
#define NV_MAX4(a,b,c,d) NV_MAX3(NV_MAX(a,b),c,d)
static inline int NV_LIMIT_VAL_TO_MIN_MAX(int val, int min, int max)
{
if (val < min) {
return min;
}
if (val > max) {
return max;
}
return val;
}
#define NV_ROUNDUP_DIV(x,y) ((x) / (y) + (((x) % (y)) ? 1 : 0))
/*
* Macros used for computing palette entries:
*
* NV_UNDER_REPLICATE(val, source_size, result_size) expands a value
* of source_size bits into a value of target_size bits by shifting
* the source value into the high bits and replicating the high bits
* of the value into the low bits of the result.
*
* PALETTE_DEPTH_SHIFT(val, w) maps a colormap entry for a component
* that has w bits to an appropriate entry in a LUT of 256 entries.
*/
static inline unsigned int NV_UNDER_REPLICATE(unsigned short val,
int source_size,
int result_size)
{
return (val << (result_size - source_size)) |
(val >> ((source_size << 1) - result_size));
}
static inline unsigned short PALETTE_DEPTH_SHIFT(unsigned short val, int depth)
{
return NV_UNDER_REPLICATE(val, depth, 8);
}
/*
* Use __builtin_ffs where it is supported, or provide an equivalent
* implementation for platforms like riscv where it is not.
*/
#if defined(__GNUC__) && !NVCPU_IS_RISCV64
static inline int nv_ffs(int x)
{
return __builtin_ffs(x);
}
#else
static inline int nv_ffs(int x)
{
if (x == 0)
return 0;
LOWESTBITIDX_32(x);
return 1 + x;
}
#endif
#endif /* __NV_COMMON_UTILS_H__ */

View File

@@ -0,0 +1,370 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2010-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* This header file defines the types NVDpyId and NVDpyIdList, as well
* as inline functions to manipulate these types. NVDpyId and
* NVDpyIdList should be treated as opaque by includers of this header
* file.
*/
#ifndef __NV_DPY_ID_H__
#define __NV_DPY_ID_H__
#include "nvtypes.h"
#include "nvmisc.h"
#include "nv_common_utils.h"
#include <nvlimits.h> /* NV_MAX_SUBDEVICES */
typedef struct {
NvU32 opaqueDpyId;
} NVDpyId;
typedef struct {
NvU32 opaqueDpyIdList;
} NVDpyIdList;
#define NV_DPY_ID_MAX_SUBDEVICES NV_MAX_SUBDEVICES
#define NV_DPY_ID_MAX_DPYS_IN_LIST 32
/*
* For use in combination with nvDpyIdToPrintFormat(); e.g.,
*
* printf("dpy id: " NV_DPY_ID_PRINT_FORMAT "\n",
* nvDpyIdToPrintFormat(dpyId));
*
* The includer should not make assumptions about the return type of
* nvDpyIdToPrintFormat().
*/
#define NV_DPY_ID_PRINT_FORMAT "0x%08x"
/* functions to return an invalid DpyId and empty DpyIdList */
static inline NVDpyId nvInvalidDpyId(void)
{
NVDpyId dpyId = { 0 };
return dpyId;
}
static inline NVDpyIdList nvEmptyDpyIdList(void)
{
NVDpyIdList dpyIdList = { 0 };
return dpyIdList;
}
static inline NVDpyIdList nvAllDpyIdList(void)
{
NVDpyIdList dpyIdList = { ~0U };
return dpyIdList;
}
static inline void
nvEmptyDpyIdListSubDeviceArray(NVDpyIdList dpyIdList[NV_DPY_ID_MAX_SUBDEVICES])
{
int dispIndex;
for (dispIndex = 0; dispIndex < NV_DPY_ID_MAX_SUBDEVICES; dispIndex++) {
dpyIdList[dispIndex] = nvEmptyDpyIdList();
}
}
/* set operations on DpyIds and DpyIdLists: Add, Subtract, Intersect, Xor */
static inline __attribute__ ((warn_unused_result))
NVDpyIdList nvAddDpyIdToDpyIdList(NVDpyId dpyId, NVDpyIdList dpyIdList)
{
NVDpyIdList tmpDpyIdList;
tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList |
dpyId.opaqueDpyId;
return tmpDpyIdList;
}
/* Passing an invalid display ID makes this function return an empty list. */
static inline __attribute__ ((warn_unused_result))
NVDpyIdList nvAddDpyIdToEmptyDpyIdList(NVDpyId dpyId)
{
NVDpyIdList tmpDpyIdList;
tmpDpyIdList.opaqueDpyIdList = dpyId.opaqueDpyId;
return tmpDpyIdList;
}
static inline __attribute__ ((warn_unused_result))
NVDpyIdList nvAddDpyIdListToDpyIdList(NVDpyIdList dpyIdListA,
NVDpyIdList dpyIdListB)
{
NVDpyIdList tmpDpyIdList;
tmpDpyIdList.opaqueDpyIdList = dpyIdListB.opaqueDpyIdList |
dpyIdListA.opaqueDpyIdList;
return tmpDpyIdList;
}
/* Returns: dpyIdList - dpyId */
static inline __attribute__ ((warn_unused_result))
NVDpyIdList nvDpyIdListMinusDpyId(NVDpyIdList dpyIdList, NVDpyId dpyId)
{
NVDpyIdList tmpDpyIdList;
tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList &
(~dpyId.opaqueDpyId);
return tmpDpyIdList;
}
/* Returns: dpyIdListA - dpyIdListB */
static inline __attribute__ ((warn_unused_result))
NVDpyIdList nvDpyIdListMinusDpyIdList(NVDpyIdList dpyIdListA,
NVDpyIdList dpyIdListB)
{
NVDpyIdList tmpDpyIdList;
tmpDpyIdList.opaqueDpyIdList = dpyIdListA.opaqueDpyIdList &
(~dpyIdListB.opaqueDpyIdList);
return tmpDpyIdList;
}
static inline __attribute__ ((warn_unused_result))
NVDpyIdList nvIntersectDpyIdAndDpyIdList(NVDpyId dpyId, NVDpyIdList dpyIdList)
{
NVDpyIdList tmpDpyIdList;
tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList &
dpyId.opaqueDpyId;
return tmpDpyIdList;
}
static inline __attribute__ ((warn_unused_result))
NVDpyIdList nvIntersectDpyIdListAndDpyIdList(NVDpyIdList dpyIdListA,
NVDpyIdList dpyIdListB)
{
NVDpyIdList tmpDpyIdList;
tmpDpyIdList.opaqueDpyIdList = dpyIdListA.opaqueDpyIdList &
dpyIdListB.opaqueDpyIdList;
return tmpDpyIdList;
}
static inline __attribute__ ((warn_unused_result))
NVDpyIdList nvXorDpyIdAndDpyIdList(NVDpyId dpyId, NVDpyIdList dpyIdList)
{
NVDpyIdList tmpDpyIdList;
tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList ^
dpyId.opaqueDpyId;
return tmpDpyIdList;
}
static inline __attribute__ ((warn_unused_result))
NVDpyIdList nvXorDpyIdListAndDpyIdList(NVDpyIdList dpyIdListA,
NVDpyIdList dpyIdListB)
{
NVDpyIdList tmpDpyIdList;
tmpDpyIdList.opaqueDpyIdList = dpyIdListA.opaqueDpyIdList ^
dpyIdListB.opaqueDpyIdList;
return tmpDpyIdList;
}
/* boolean checks */
static inline NvBool nvDpyIdIsInDpyIdList(NVDpyId dpyId,
NVDpyIdList dpyIdList)
{
return !!(dpyIdList.opaqueDpyIdList & dpyId.opaqueDpyId);
}
static inline NvBool nvDpyIdIsInvalid(NVDpyId dpyId)
{
return (dpyId.opaqueDpyId == 0);
}
static inline NvBool nvDpyIdListIsEmpty(NVDpyIdList dpyIdList)
{
return (dpyIdList.opaqueDpyIdList == 0);
}
static inline NvBool
nvDpyIdListSubDeviceArrayIsEmpty(NVDpyIdList
dpyIdList[NV_DPY_ID_MAX_SUBDEVICES])
{
int dispIndex;
for (dispIndex = 0; dispIndex < NV_DPY_ID_MAX_SUBDEVICES; dispIndex++) {
if (!nvDpyIdListIsEmpty(dpyIdList[dispIndex])) {
return NV_FALSE;
}
}
return NV_TRUE;
}
static inline NvBool nvDpyIdsAreEqual(NVDpyId dpyIdA, NVDpyId dpyIdB)
{
return (dpyIdA.opaqueDpyId == dpyIdB.opaqueDpyId);
}
static inline NvBool nvDpyIdListsAreEqual(NVDpyIdList dpyIdListA,
NVDpyIdList dpyIdListB)
{
return (dpyIdListA.opaqueDpyIdList == dpyIdListB.opaqueDpyIdList);
}
static inline NvBool nvDpyIdListIsASubSetofDpyIdList(NVDpyIdList dpyIdListA,
NVDpyIdList dpyIdListB)
{
NVDpyIdList intersectedDpyIdList =
nvIntersectDpyIdListAndDpyIdList(dpyIdListA, dpyIdListB);
return nvDpyIdListsAreEqual(intersectedDpyIdList, dpyIdListA);
}
/*
* retrieve the individual dpyIds from dpyIdList; if dpyId is invalid,
* start at the beginning of the list; otherwise, start at the dpyId
* after the specified dpyId
*/
static inline __attribute__ ((warn_unused_result))
NVDpyId nvNextDpyIdInDpyIdListUnsorted(NVDpyId dpyId, NVDpyIdList dpyIdList)
{
if (nvDpyIdIsInvalid(dpyId)) {
dpyId.opaqueDpyId = 1;
} else {
dpyId.opaqueDpyId <<= 1;
}
while (dpyId.opaqueDpyId) {
if (nvDpyIdIsInDpyIdList(dpyId, dpyIdList)) {
return dpyId;
}
dpyId.opaqueDpyId <<= 1;
}
/* no dpyIds left in dpyIdlist; return the invalid dpyId */
return nvInvalidDpyId();
}
#define FOR_ALL_DPY_IDS(_dpyId, _dpyIdList) \
for ((_dpyId) = nvNextDpyIdInDpyIdListUnsorted(nvInvalidDpyId(), \
(_dpyIdList)); \
!nvDpyIdIsInvalid(_dpyId); \
(_dpyId) = nvNextDpyIdInDpyIdListUnsorted((_dpyId), \
(_dpyIdList)))
/* report how many dpyIds are in the dpyIdList */
static inline int nvCountDpyIdsInDpyIdList(NVDpyIdList dpyIdList)
{
return nvPopCount32(dpyIdList.opaqueDpyIdList);
}
static inline int
nvCountDpyIdsInDpyIdListSubDeviceArray(NVDpyIdList
dpyIdList[NV_DPY_ID_MAX_SUBDEVICES])
{
int dispIndex, n = 0;
for (dispIndex = 0; dispIndex < NV_DPY_ID_MAX_SUBDEVICES; dispIndex++) {
n += nvCountDpyIdsInDpyIdList(dpyIdList[dispIndex]);
}
return n;
}
/* convert between dpyId/dpyIdList and NV-CONTROL values */
static inline int nvDpyIdToNvControlVal(NVDpyId dpyId)
{
return (int) dpyId.opaqueDpyId;
}
static inline int nvDpyIdListToNvControlVal(NVDpyIdList dpyIdList)
{
return (int) dpyIdList.opaqueDpyIdList;
}
static inline NVDpyId nvNvControlValToDpyId(int val)
{
NVDpyId dpyId;
dpyId.opaqueDpyId = (val == 0) ? 0 : 1 << (nv_ffs(val)-1);
return dpyId;
}
static inline NVDpyIdList nvNvControlValToDpyIdList(int val)
{
NVDpyIdList dpyIdList;
dpyIdList.opaqueDpyIdList = val;
return dpyIdList;
}
/* convert between dpyId and NvU32 */
static inline NVDpyId nvNvU32ToDpyId(NvU32 val)
{
NVDpyId dpyId;
dpyId.opaqueDpyId = (val == 0) ? 0 : 1 << (nv_ffs(val)-1);
return dpyId;
}
static inline NVDpyIdList nvNvU32ToDpyIdList(NvU32 val)
{
NVDpyIdList dpyIdList;
dpyIdList.opaqueDpyIdList = val;
return dpyIdList;
}
static inline NvU32 nvDpyIdToNvU32(NVDpyId dpyId)
{
return dpyId.opaqueDpyId;
}
static inline NvU32 nvDpyIdListToNvU32(NVDpyIdList dpyIdList)
{
return dpyIdList.opaqueDpyIdList;
}
/* Return the bit position of dpyId: a number in the range [0..31]. */
static inline NvU32 nvDpyIdToIndex(NVDpyId dpyId)
{
return nv_ffs(dpyId.opaqueDpyId) - 1;
}
/* Return a display ID that is not in the list passed in. */
static inline NVDpyId nvNewDpyId(NVDpyIdList excludeList)
{
NVDpyId dpyId;
if (~excludeList.opaqueDpyIdList == 0) {
return nvInvalidDpyId();
}
dpyId.opaqueDpyId =
1U << (nv_ffs(~excludeList.opaqueDpyIdList) - 1);
return dpyId;
}
/* See comment for NV_DPY_ID_PRINT_FORMAT. */
static inline NvU32 nvDpyIdToPrintFormat(NVDpyId dpyId)
{
return nvDpyIdToNvU32(dpyId);
}
/* Prevent usage of opaque values. */
#define opaqueDpyId __ERROR_ACCESS_ME_VIA_NV_DPY_ID_H
#define opaqueDpyIdList __ERROR_ACCESS_ME_VIA_NV_DPY_ID_H
#endif /* __NV_DPY_ID_H__ */

View File

@@ -0,0 +1,46 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NV_MIG_TYPES_H__
#define __NV_MIG_TYPES_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "nvtypes.h"
typedef NvU32 MIGDeviceId;
#define NO_MIG_DEVICE 0L
/* Convert a MIGDeviceId into a 0-based per-GPU subdevice index. */
#define MIG_DEVICE_ID_SUBDEV_MASK 0xf0000000
#define MIG_DEVICE_ID_SUBDEV_SHIFT 28
#define MIG_DEVICE_ID_TO_SUBDEV(migDeviceId) (((migDeviceId) & MIG_DEVICE_ID_SUBDEV_MASK) >> MIG_DEVICE_ID_SUBDEV_SHIFT)
#ifdef __cplusplus
}
#endif
#endif /* __NV_MIG_TYPES_H__ */

View File

@@ -86,7 +86,7 @@
/* Not currently implemented for MSVC/ARM64. See bug 3366890. */
# define nv_speculation_barrier()
# define speculation_barrier() nv_speculation_barrier()
#elif defined(NVCPU_NVRISCV64) && NVOS_IS_LIBOS
#elif defined(NVCPU_IS_RISCV64)
# define nv_speculation_barrier()
#else
#error "Unknown compiler/chip family"

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2013-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,7 +29,7 @@
#define _NV_UVM_INTERFACE_H_
// Forward references, to break circular header file dependencies:
struct UvmOpsUvmEvents;
struct UvmEventsLinux;
#if defined(NVIDIA_UVM_ENABLED)
@@ -46,6 +46,7 @@ struct UvmOpsUvmEvents;
#include "nvgputypes.h"
#include "nvstatus.h"
#include "nv_uvm_types.h"
#include "nv_uvm_user_types.h"
// Define the type here as it's Linux specific, used only by the Linux specific
@@ -62,10 +63,10 @@ typedef struct
/*******************************************************************************
nvUvmInterfaceRegisterGpu
Registers the GPU with the provided UUID for use. A GPU must be registered
before its UUID can be used with any other API. This call is ref-counted so
every nvUvmInterfaceRegisterGpu must be paired with a corresponding
nvUvmInterfaceUnregisterGpu.
Registers the GPU with the provided physical UUID for use. A GPU must be
registered before its UUID can be used with any other API. This call is
ref-counted so every nvUvmInterfaceRegisterGpu must be paired with a
corresponding nvUvmInterfaceUnregisterGpu.
You don't need to call nvUvmInterfaceSessionCreate before calling this.
@@ -79,12 +80,13 @@ NV_STATUS nvUvmInterfaceRegisterGpu(const NvProcessorUuid *gpuUuid, UvmGpuPlatfo
/*******************************************************************************
nvUvmInterfaceUnregisterGpu
Unregisters the GPU with the provided UUID. This drops the ref count from
nvUvmInterfaceRegisterGpu. Once the reference count goes to 0 the device may
no longer be accessible until the next nvUvmInterfaceRegisterGpu call. No
automatic resource freeing is performed, so only make the last unregister
call after destroying all your allocations associated with that UUID (such
as those from nvUvmInterfaceAddressSpaceCreate).
Unregisters the GPU with the provided physical UUID. This drops the ref
count from nvUvmInterfaceRegisterGpu. Once the reference count goes to 0
the device may no longer be accessible until the next
nvUvmInterfaceRegisterGpu call. No automatic resource freeing is performed,
so only make the last unregister call after destroying all your allocations
associated with that UUID (such as those from
nvUvmInterfaceAddressSpaceCreate).
If the UUID is not found, no operation is performed.
*/
@@ -121,10 +123,10 @@ NV_STATUS nvUvmInterfaceSessionDestroy(uvmGpuSessionHandle session);
nvUvmInterfaceDeviceCreate
Creates a device object under the given session for the GPU with the given
UUID. Also creates a partition object for the device iff bCreateSmcPartition
is true and pGpuInfo->smcEnabled is true. pGpuInfo->smcUserClientInfo will
be used to determine the SMC partition in this case. A device handle is
returned in the device output parameter.
physical UUID. Also creates a partition object for the device iff
bCreateSmcPartition is true and pGpuInfo->smcEnabled is true.
pGpuInfo->smcUserClientInfo will be used to determine the SMC partition in
this case. A device handle is returned in the device output parameter.
Error codes:
NV_ERR_GENERIC
@@ -161,6 +163,7 @@ void nvUvmInterfaceDeviceDestroy(uvmGpuDeviceHandle device);
NV_STATUS nvUvmInterfaceAddressSpaceCreate(uvmGpuDeviceHandle device,
unsigned long long vaBase,
unsigned long long vaSize,
NvBool enableAts,
uvmGpuAddressSpaceHandle *vaSpace,
UvmGpuAddressSpaceInfo *vaSpaceInfo);
@@ -422,33 +425,6 @@ NV_STATUS nvUvmInterfacePmaPinPages(void *pPma,
NvU64 pageSize,
NvU32 flags);
/*******************************************************************************
nvUvmInterfacePmaUnpinPages
This function will unpin the physical memory allocated using PMA. The pages
passed as input must be already pinned, else this function will return an
error and rollback any change if any page is not previously marked "pinned".
Behaviour is undefined if any blacklisted pages are unpinned.
Arguments:
pPma[IN] - Pointer to PMA object.
pPages[IN] - Array of pointers, containing the PA base
address of each page to be unpinned.
pageCount [IN] - Number of pages required to be unpinned.
pageSize [IN] - Page size of each page to be unpinned.
Error codes:
NV_ERR_INVALID_ARGUMENT - Invalid input arguments.
NV_ERR_GENERIC - Unexpected error. We try hard to avoid
returning this error code as is not very
informative.
NV_ERR_NOT_SUPPORTED - Operation not supported on broken FB
*/
NV_STATUS nvUvmInterfacePmaUnpinPages(void *pPma,
NvU64 *pPages,
NvLength pageCount,
NvU64 pageSize);
/*******************************************************************************
nvUvmInterfaceMemoryFree
@@ -617,6 +593,14 @@ void nvUvmInterfaceChannelDestroy(uvmGpuChannelHandle channel);
Error codes:
NV_ERR_GENERIC
NV_ERR_NO_MEMORY
NV_ERR_INVALID_STATE
NV_ERR_NOT_SUPPORTED
NV_ERR_NOT_READY
NV_ERR_INVALID_LOCK_STATE
NV_ERR_INVALID_STATE
NV_ERR_NVLINK_FABRIC_NOT_READY
NV_ERR_NVLINK_FABRIC_FAILURE
NV_ERR_GPU_MEMORY_ONLINING_FAILURE
*/
NV_STATUS nvUvmInterfaceQueryCaps(uvmGpuDeviceHandle device,
UvmGpuCaps *caps);
@@ -638,6 +622,8 @@ NV_STATUS nvUvmInterfaceQueryCopyEnginesCaps(uvmGpuDeviceHandle device,
nvUvmInterfaceGetGpuInfo
Return various gpu info, refer to the UvmGpuInfo struct for details.
The input UUID is for the physical GPU and the pGpuClientInfo identifies
the SMC partition if SMC is enabled and the partition exists.
If no gpu matching the uuid is found, an error will be returned.
On Ampere+ GPUs, pGpuClientInfo contains SMC information provided by the
@@ -645,6 +631,9 @@ NV_STATUS nvUvmInterfaceQueryCopyEnginesCaps(uvmGpuDeviceHandle device,
Error codes:
NV_ERR_GENERIC
NV_ERR_NO_MEMORY
NV_ERR_GPU_UUID_NOT_FOUND
NV_ERR_INSUFFICIENT_PERMISSIONS
NV_ERR_INSUFFICIENT_RESOURCES
*/
NV_STATUS nvUvmInterfaceGetGpuInfo(const NvProcessorUuid *gpuUuid,
@@ -672,14 +661,20 @@ NV_STATUS nvUvmInterfaceServiceDeviceInterruptsRM(uvmGpuDeviceHandle device);
RM will propagate the update to all channels using the provided VA space.
All channels must be idle when this call is made.
If the pageDirectory is in system memory then a CPU physical address must be
provided. RM will establish and manage the DMA mapping for the
pageDirectory.
Arguments:
vaSpace[IN} - VASpace Object
physAddress[IN] - Physical address of new page directory
physAddress[IN] - Physical address of new page directory. If
!bVidMemAperture this is a CPU physical address.
numEntries[IN] - Number of entries including previous PDE which will be copied
bVidMemAperture[IN] - If set pageDirectory will reside in VidMem aperture else sysmem
pasid[IN] - PASID (Process Address Space IDentifier) of the process
corresponding to the VA space. Ignored unless the VA space
object has ATS enabled.
dmaAddress[OUT] - DMA mapping created for physAddress.
Error codes:
NV_ERR_GENERIC
@@ -687,7 +682,8 @@ NV_STATUS nvUvmInterfaceServiceDeviceInterruptsRM(uvmGpuDeviceHandle device);
*/
NV_STATUS nvUvmInterfaceSetPageDirectory(uvmGpuAddressSpaceHandle vaSpace,
NvU64 physAddress, unsigned numEntries,
NvBool bVidMemAperture, NvU32 pasid);
NvBool bVidMemAperture, NvU32 pasid,
NvU64 *dmaAddress);
/*******************************************************************************
nvUvmInterfaceUnsetPageDirectory
@@ -857,7 +853,7 @@ NV_STATUS nvUvmInterfaceGetEccInfo(uvmGpuDeviceHandle device,
UVM GPU UNLOCK
Arguments:
gpuUuid[IN] - UUID of the GPU to operate on
device[IN] - Device handle associated with the gpu
bOwnInterrupts - Set to NV_TRUE for UVM to take ownership of the
replayable page fault interrupts. Set to NV_FALSE
to return ownership of the page fault interrupts
@@ -973,14 +969,45 @@ NV_STATUS nvUvmInterfaceGetNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo,
NOTES:
- This function DOES NOT acquire the RM API or GPU locks. That is because
it is called during fault servicing, which could produce deadlocks.
- This function should not be called when interrupts are disabled.
Arguments:
device[IN] - Device handle associated with the gpu
pFaultInfo[IN] - information provided by RM for fault handling.
used for obtaining the device handle without locks.
bCopyAndFlush[IN] - Instructs RM to perform the flush in the Copy+Flush mode.
In this mode, RM will perform a copy of the packets from
the HW buffer to UVM's SW buffer as part of performing
the flush. This mode gives UVM the opportunity to observe
the packets contained within the HW buffer at the time
of issuing the call.
Error codes:
NV_ERR_INVALID_ARGUMENT
*/
NV_STATUS nvUvmInterfaceFlushReplayableFaultBuffer(uvmGpuDeviceHandle device);
NV_STATUS nvUvmInterfaceFlushReplayableFaultBuffer(UvmGpuFaultInfo *pFaultInfo,
NvBool bCopyAndFlush);
/*******************************************************************************
nvUvmInterfaceTogglePrefetchFaults
This function sends an RPC to GSP in order to toggle the prefetch fault PRI.
NOTES:
- This function DOES NOT acquire the RM API or GPU locks. That is because
it is called during fault servicing, which could produce deadlocks.
- This function should not be called when interrupts are disabled.
Arguments:
pFaultInfo[IN] - Information provided by RM for fault handling.
Used for obtaining the device handle without locks.
bEnable[IN] - Instructs RM whether to toggle generating faults on
prefetch on/off.
Error codes:
NV_ERR_INVALID_ARGUMENT
*/
NV_STATUS nvUvmInterfaceTogglePrefetchFaults(UvmGpuFaultInfo *pFaultInfo,
NvBool bEnable);
/*******************************************************************************
nvUvmInterfaceInitAccessCntrInfo
@@ -1037,7 +1064,7 @@ NV_STATUS nvUvmInterfaceDestroyAccessCntrInfo(uvmGpuDeviceHandle device,
*/
NV_STATUS nvUvmInterfaceEnableAccessCntr(uvmGpuDeviceHandle device,
UvmGpuAccessCntrInfo *pAccessCntrInfo,
UvmGpuAccessCntrConfig *pAccessCntrConfig);
const UvmGpuAccessCntrConfig *pAccessCntrConfig);
/*******************************************************************************
nvUvmInterfaceDisableAccessCntr
@@ -1059,20 +1086,36 @@ NV_STATUS nvUvmInterfaceDisableAccessCntr(uvmGpuDeviceHandle device,
UvmGpuAccessCntrInfo *pAccessCntrInfo);
//
// Called by the UVM driver to register operations with RM. Only one set of
// Called by the UVM driver to register event callbacks with RM. Only one set of
// callbacks can be registered by any driver at a time. If another set of
// callbacks was already registered, NV_ERR_IN_USE is returned.
//
NV_STATUS nvUvmInterfaceRegisterUvmCallbacks(struct UvmOpsUvmEvents *importedUvmOps);
NV_STATUS nvUvmInterfaceRegisterUvmEvents(struct UvmEventsLinux *importedEvents);
//
// Counterpart to nvUvmInterfaceRegisterUvmCallbacks. This must only be called
// if nvUvmInterfaceRegisterUvmCallbacks returned NV_OK.
// Counterpart to nvUvmInterfaceRegisterUvmEvents. This must only be called if
// nvUvmInterfaceRegisterUvmEvents returned NV_OK.
//
// Upon return, the caller is guaranteed that any outstanding callbacks are done
// and no new ones will be invoked.
//
void nvUvmInterfaceDeRegisterUvmOps(void);
void nvUvmInterfaceDeRegisterUvmEvents(void);
/*******************************************************************************
nvUvmInterfaceGetNvlinkInfo
Gets NVLINK information from RM.
Arguments:
device[IN] - GPU device handle
nvlinkInfo [OUT] - Pointer to NvlinkInfo structure
Error codes:
NV_ERROR
NV_ERR_INVALID_ARGUMENT
*/
NV_STATUS nvUvmInterfaceGetNvlinkInfo(uvmGpuDeviceHandle device,
UvmGpuNvlinkInfo *nvlinkInfo);
/*******************************************************************************
nvUvmInterfaceP2pObjectCreate
@@ -1087,7 +1130,8 @@ void nvUvmInterfaceDeRegisterUvmOps(void);
Error codes:
NV_ERR_INVALID_ARGUMENT
NV_ERR_OBJECT_NOT_FOUND : If device object associated with the uuids aren't found.
NV_ERR_OBJECT_NOT_FOUND : If device object associated with the device
handles isn't found.
*/
NV_STATUS nvUvmInterfaceP2pObjectCreate(uvmGpuDeviceHandle device1,
uvmGpuDeviceHandle device2,
@@ -1147,6 +1191,46 @@ NV_STATUS nvUvmInterfaceGetExternalAllocPtes(uvmGpuAddressSpaceHandle vaSpace,
NvU64 size,
UvmGpuExternalMappingInfo *gpuExternalMappingInfo);
/*******************************************************************************
nvUvmInterfaceGetExternalAllocPhysAddrs
The interface builds the RM physical addrs using the provided input parameters.
Arguments:
vaSpace[IN] - vaSpace handle.
hMemory[IN] - Memory handle.
offset [IN] - Offset from the beginning of the allocation
where PTE mappings should begin.
Should be aligned with mappingPagesize
in gpuExternalMappingInfo associated
with the allocation.
size [IN] - Length of the allocation for which PhysAddrs
should be built.
Should be aligned with mappingPagesize
in gpuExternalMappingInfo associated
with the allocation.
size = 0 will be interpreted as the total size
of the allocation.
gpuExternalMappingInfo[IN/OUT] - See nv_uvm_types.h for more information.
Error codes:
NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed.
NV_ERR_INVALID_OBJECT_HANDLE - Invalid memory handle is passed.
NV_ERR_NOT_SUPPORTED - Functionality is not supported (see comments in nv_gpu_ops.c)
NV_ERR_INVALID_BASE - offset is beyond the allocation size
NV_ERR_INVALID_LIMIT - (offset + size) is beyond the allocation size.
NV_ERR_BUFFER_TOO_SMALL - gpuExternalMappingInfo.physAddrBufferSize is insufficient to
store single physAddr.
NV_ERR_NOT_READY - Returned when querying the physAddrs requires a deferred setup
which has not yet completed. It is expected that the caller
will reattempt the call until a different code is returned.
*/
NV_STATUS nvUvmInterfaceGetExternalAllocPhysAddrs(uvmGpuAddressSpaceHandle vaSpace,
NvHandle hMemory,
NvU64 offset,
NvU64 size,
UvmGpuExternalPhysAddrInfo *gpuExternalPhysAddrsInfo);
/*******************************************************************************
nvUvmInterfaceRetainChannel
@@ -1449,18 +1533,17 @@ NV_STATUS nvUvmInterfacePagingChannelPushStream(UvmGpuPagingChannelHandle channe
NvU32 methodStreamSize);
/*******************************************************************************
CSL Interface and Locking
nvUvmInterfaceReportFatalError
The following functions do not acquire the RM API or GPU locks and must not be called
concurrently with the same UvmCslContext parameter in different threads. The caller must
guarantee this exclusion.
Reports a global fatal error so RM can inform the clients that a node reboot
is necessary to recover from this error. This function can be called from
any lock environment, bottom half or non-interrupt context.
* nvUvmInterfaceCslRotateIv
* nvUvmInterfaceCslEncrypt
* nvUvmInterfaceCslDecrypt
* nvUvmInterfaceCslSign
* nvUvmInterfaceCslQueryMessagePool
* nvUvmInterfaceCslIncrementIv
*/
void nvUvmInterfaceReportFatalError(NV_STATUS error);
/*******************************************************************************
Cryptography Services Library (CSL) Interface
*/
/*******************************************************************************
@@ -1471,8 +1554,11 @@ NV_STATUS nvUvmInterfacePagingChannelPushStream(UvmGpuPagingChannelHandle channe
The lifetime of the context is the same as the lifetime of the secure channel
it is paired with.
Locking: This function acquires an API lock.
Memory : This function dynamically allocates memory.
Arguments:
uvmCslContext[IN/OUT] - The CSL context.
uvmCslContext[IN/OUT] - The CSL context associated with a channel.
channel[IN] - Handle to a secure channel.
Error codes:
@@ -1490,30 +1576,62 @@ NV_STATUS nvUvmInterfaceCslInitContext(UvmCslContext *uvmCslContext,
If context is already deinitialized then function returns immediately.
Locking: This function does not acquire an API or GPU lock.
Memory : This function may free memory.
Arguments:
uvmCslContext[IN] - The CSL context.
uvmCslContext[IN] - The CSL context associated with a channel.
*/
void nvUvmInterfaceDeinitCslContext(UvmCslContext *uvmCslContext);
/*******************************************************************************
nvUvmInterfaceCslRotateKey
Disables channels and rotates keys.
This function disables channels and rotates associated keys. The channels
associated with the given CSL contexts must be idled before this function is
called. To trigger key rotation all allocated channels for a given key must
be present in the list. If the function returns successfully then the CSL
contexts have been updated with the new key.
Locking: This function attempts to acquire the GPU lock. In case of failure
to acquire the return code is NV_ERR_STATE_IN_USE. The caller must
guarantee that no CSL function, including this one, is invoked
concurrently with the CSL contexts in contextList.
Memory : This function dynamically allocates memory.
Arguments:
contextList[IN/OUT] - An array of pointers to CSL contexts.
contextListCount[IN] - Number of CSL contexts in contextList. Its value
must be greater than 0.
Error codes:
NV_ERR_INVALID_ARGUMENT - contextList is NULL or contextListCount is 0.
NV_ERR_STATE_IN_USE - Unable to acquire lock / resource. Caller
can retry at a later time.
NV_ERR_GENERIC - A failure other than _STATE_IN_USE occurred
when attempting to acquire a lock.
*/
NV_STATUS nvUvmInterfaceCslRotateKey(UvmCslContext *contextList[],
NvU32 contextListCount);
/*******************************************************************************
nvUvmInterfaceCslRotateIv
Rotates the IV for a given channel and operation.
This function will rotate the IV on both the CPU and the GPU.
Outstanding messages that have been encrypted by the GPU should first be
decrypted before calling this function with operation equal to
UVM_CSL_OPERATION_DECRYPT. Similarly, outstanding messages that have been
encrypted by the CPU should first be decrypted before calling this function
with operation equal to UVM_CSL_OPERATION_ENCRYPT. For a given operation
the channel must be idle before calling this function. This function can be
called regardless of the value of the IV's message counter.
For a given operation the channel must be idle before calling this function.
This function can be called regardless of the value of the IV's message counter.
See "CSL Interface and Locking" for locking requirements.
This function does not perform dynamic memory allocation.
Locking: This function attempts to acquire the GPU lock. In case of failure to
acquire the return code is NV_ERR_STATE_IN_USE. The caller must guarantee
that no CSL function, including this one, is invoked concurrently with
the same CSL context.
Memory : This function does not dynamically allocate memory.
Arguments:
uvmCslContext[IN/OUT] - The CSL context.
uvmCslContext[IN/OUT] - The CSL context associated with a channel.
operation[IN] - Either
- UVM_CSL_OPERATION_ENCRYPT
- UVM_CSL_OPERATION_DECRYPT
@@ -1521,7 +1639,11 @@ Arguments:
Error codes:
NV_ERR_INSUFFICIENT_RESOURCES - The rotate operation would cause a counter
to overflow.
NV_ERR_STATE_IN_USE - Unable to acquire lock / resource. Caller
can retry at a later time.
NV_ERR_INVALID_ARGUMENT - Invalid value for operation.
NV_ERR_GENERIC - A failure other than _STATE_IN_USE occurred
when attempting to acquire a lock.
*/
NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext,
UvmCslOperation operation);
@@ -1538,11 +1660,13 @@ NV_STATUS nvUvmInterfaceCslRotateIv(UvmCslContext *uvmCslContext,
The encryptIV can be obtained from nvUvmInterfaceCslIncrementIv.
However, it is optional. If it is NULL, the next IV in line will be used.
See "CSL Interface and Locking" for locking requirements.
This function does not perform dynamic memory allocation.
Locking: This function does not acquire an API or GPU lock.
The caller must guarantee that no CSL function, including this one,
is invoked concurrently with the same CSL context.
Memory : This function does not dynamically allocate memory.
Arguments:
uvmCslContext[IN/OUT] - The CSL context.
uvmCslContext[IN/OUT] - The CSL context associated with a channel.
bufferSize[IN] - Size of the input and output buffers in
units of bytes. Value can range from 1 byte
to (2^32) - 1 bytes.
@@ -1553,8 +1677,9 @@ Arguments:
Its size is UVM_CSL_CRYPT_AUTH_TAG_SIZE_BYTES.
Error codes:
NV_ERR_INVALID_ARGUMENT - The size of the data is 0 bytes.
- The encryptIv has already been used.
NV_ERR_INVALID_ARGUMENT - The CSL context is not associated with a channel.
- The size of the data is 0 bytes.
- The encryptIv has already been used.
*/
NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *uvmCslContext,
NvU32 bufferSize,
@@ -1573,8 +1698,15 @@ NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *uvmCslContext,
maximized when the input and output buffers are 16-byte aligned. This is
natural alignment for AES block.
See "CSL Interface and Locking" for locking requirements.
This function does not perform dynamic memory allocation.
During a key rotation event the previous key is stored in the CSL context.
This allows data encrypted by the GPU to be decrypted with the previous key.
The keyRotationId parameter identifies which key is used. The first key rotation
ID has a value of 0 that increments by one for each key rotation event.
Locking: This function does not acquire an API or GPU lock.
The caller must guarantee that no CSL function, including this one,
is invoked concurrently with the same CSL context.
Memory : This function does not dynamically allocate memory.
Arguments:
uvmCslContext[IN/OUT] - The CSL context.
@@ -1583,6 +1715,8 @@ NV_STATUS nvUvmInterfaceCslEncrypt(UvmCslContext *uvmCslContext,
decryptIv[IN] - IV used to decrypt the ciphertext. Its value can either be given by
nvUvmInterfaceCslIncrementIv, or, if NULL, the CSL context's
internal counter is used.
keyRotationId[IN] - Specifies the key that is used for decryption.
A value of NV_U32_MAX specifies the current key.
inputBuffer[IN] - Address of ciphertext input buffer.
outputBuffer[OUT] - Address of plaintext output buffer.
addAuthData[IN] - Address of the plaintext additional authenticated data used to
@@ -1603,6 +1737,7 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
NvU32 bufferSize,
NvU8 const *inputBuffer,
UvmCslIv const *decryptIv,
NvU32 keyRotationId,
NvU8 *outputBuffer,
NvU8 const *addAuthData,
NvU32 addAuthDataSize,
@@ -1616,11 +1751,13 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
Auth and input buffers must not overlap. If they do then calling this function produces
undefined behavior.
See "CSL Interface and Locking" for locking requirements.
This function does not perform dynamic memory allocation.
Locking: This function does not acquire an API or GPU lock.
The caller must guarantee that no CSL function, including this one,
is invoked concurrently with the same CSL context.
Memory : This function does not dynamically allocate memory.
Arguments:
uvmCslContext[IN/OUT] - The CSL context.
uvmCslContext[IN/OUT] - The CSL context associated with a channel.
bufferSize[IN] - Size of the input buffer in units of bytes.
Value can range from 1 byte to (2^32) - 1 bytes.
inputBuffer[IN] - Address of plaintext input buffer.
@@ -1629,7 +1766,8 @@ NV_STATUS nvUvmInterfaceCslDecrypt(UvmCslContext *uvmCslContext,
Error codes:
NV_ERR_INSUFFICIENT_RESOURCES - The signing operation would cause a counter overflow to occur.
NV_ERR_INVALID_ARGUMENT - The size of the data is 0 bytes.
NV_ERR_INVALID_ARGUMENT - The CSL context is not associated with a channel.
- The size of the data is 0 bytes.
*/
NV_STATUS nvUvmInterfaceCslSign(UvmCslContext *uvmCslContext,
NvU32 bufferSize,
@@ -1641,8 +1779,10 @@ NV_STATUS nvUvmInterfaceCslSign(UvmCslContext *uvmCslContext,
Returns the number of messages that can be encrypted before the message counter will overflow.
See "CSL Interface and Locking" for locking requirements.
This function does not perform dynamic memory allocation.
Locking: This function does not acquire an API or GPU lock.
Memory : This function does not dynamically allocate memory.
The caller must guarantee that no CSL function, including this one,
is invoked concurrently with the same CSL context.
Arguments:
uvmCslContext[IN/OUT] - The CSL context.
@@ -1666,8 +1806,10 @@ NV_STATUS nvUvmInterfaceCslQueryMessagePool(UvmCslContext *uvmCslContext,
can be used in nvUvmInterfaceCslEncrypt. If operation is UVM_CSL_OPERATION_DECRYPT then
the returned IV can be used in nvUvmInterfaceCslDecrypt.
See "CSL Interface and Locking" for locking requirements.
This function does not perform dynamic memory allocation.
Locking: This function does not acquire an API or GPU lock.
The caller must guarantee that no CSL function, including this one,
is invoked concurrently with the same CSL context.
Memory : This function does not dynamically allocate memory.
Arguments:
uvmCslContext[IN/OUT] - The CSL context.
@@ -1675,7 +1817,7 @@ Arguments:
- UVM_CSL_OPERATION_ENCRYPT
- UVM_CSL_OPERATION_DECRYPT
increment[IN] - The amount by which the IV is incremented. Can be 0.
iv[out] - If non-NULL, a buffer to store the incremented IV.
iv[OUT] - If non-NULL, a buffer to store the incremented IV.
Error codes:
NV_ERR_INVALID_ARGUMENT - The value of the operation parameter is illegal.
@@ -1687,4 +1829,41 @@ NV_STATUS nvUvmInterfaceCslIncrementIv(UvmCslContext *uvmCslContext,
NvU64 increment,
UvmCslIv *iv);
/*******************************************************************************
nvUvmInterfaceCslLogEncryption
Checks and logs information about encryptions associated with the given
CSL context.
For contexts associated with channels, this function does not modify elements of
the UvmCslContext, and must be called for every CPU/GPU encryption.
For the context associated with fault buffers, bufferSize can encompass multiple
encryption invocations, and the UvmCslContext will be updated following a key
rotation event.
In either case the IV remains unmodified after this function is called.
Locking: This function does not acquire an API or GPU lock.
Memory : This function does not dynamically allocate memory.
The caller must guarantee that no CSL function, including this one,
is invoked concurrently with the same CSL context.
Arguments:
uvmCslContext[IN/OUT] - The CSL context.
operation[IN] - If the CSL context is associated with a fault
buffer, this argument is ignored. If it is
associated with a channel, it must be either
- UVM_CSL_OPERATION_ENCRYPT
- UVM_CSL_OPERATION_DECRYPT
bufferSize[IN] - The size of the buffer(s) encrypted by the
external entity in units of bytes.
Error codes:
NV_ERR_INSUFFICIENT_RESOURCES - The encryption would cause a counter
to overflow.
*/
NV_STATUS nvUvmInterfaceCslLogEncryption(UvmCslContext *uvmCslContext,
UvmCslOperation operation,
NvU32 bufferSize);
#endif // _NV_UVM_INTERFACE_H_

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2014-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -22,7 +22,8 @@
*/
//
// This file provides common types for both UVM driver and RM's UVM interface.
// This file provides common types for both the UVM kernel driver and RM's UVM
// interface.
//
#ifndef _NV_UVM_TYPES_H_
@@ -32,20 +33,9 @@
#include "nvstatus.h"
#include "nvgputypes.h"
#include "nvCpuUuid.h"
#include "nv_uvm_user_types.h" // For UvmGpuCachingType, UvmGpuMappingType, etc
//
// Default Page Size if left "0" because in RM BIG page size is default & there
// are multiple BIG page sizes in RM. These defines are used as flags to "0"
// should be OK when user is not sure which pagesize allocation it wants
//
#define UVM_PAGE_SIZE_DEFAULT 0x0
#define UVM_PAGE_SIZE_4K 0x1000
#define UVM_PAGE_SIZE_64K 0x10000
#define UVM_PAGE_SIZE_128K 0x20000
#define UVM_PAGE_SIZE_2M 0x200000
#define UVM_PAGE_SIZE_512M 0x20000000
//
// When modifying flags, make sure they are compatible with the mirrored
// PMA_* flags in phys_mem_allocator.h.
@@ -80,9 +70,6 @@
//
#define UVM_PMA_CALLED_FROM_PMA_EVICTION 16384
#define UVM_UUID_LEN 16
#define UVM_SW_OBJ_SUBCHANNEL 5
typedef unsigned long long UvmGpuPointer;
//
@@ -104,6 +91,10 @@ typedef struct UvmGpuMemoryInfo_tag
// Out: Set to TRUE, if the allocation is in sysmem.
NvBool sysmem;
// Out: Set to TRUE, if this allocation is treated as EGM.
// sysmem is also TRUE when egm is TRUE.
NvBool egm;
// Out: Set to TRUE, if the allocation is a constructed
// under a Device or Subdevice.
// All permutations of sysmem and deviceDescendant are valid.
@@ -125,6 +116,10 @@ typedef struct UvmGpuMemoryInfo_tag
// Out: Uuid of the GPU to which the allocation belongs.
// This is only valid if deviceDescendant is NV_TRUE.
// When egm is NV_TRUE, this is also the UUID of the GPU
// for which EGM is local.
// If the GPU has SMC enabled, the UUID is the GI UUID.
// Otherwise, it is the UUID for the physical GPU.
// Note: If the allocation is owned by a device in
// an SLI group and the allocation is broadcast
// across the SLI group, this UUID will be any one
@@ -226,9 +221,11 @@ typedef struct UvmGpuChannelInstanceInfo_tag
// Ampere+ GPUs
volatile NvU32 *pChramChannelRegister;
// Out: Address of the Runlist PRI Base Register required to ring the
// doorbell after clearing the faulted bit.
volatile NvU32 *pRunlistPRIBaseRegister;
// Out: Address of the doorbell.
volatile NvU32 *workSubmissionOffset;
// Out: channel handle required to ring the doorbell.
NvU32 workSubmissionToken;
// Out: SMC engine id to which the GR channel is bound, or zero if the GPU
// does not support SMC or it is a CE channel
@@ -260,6 +257,8 @@ typedef struct UvmGpuChannelInfo_tag
// The errorNotifier is filled out when the channel hits an RC error.
NvNotification *errorNotifier;
NvNotification *keyRotationNotifier;
NvU32 hwRunlistId;
NvU32 hwChannelId;
@@ -284,13 +283,14 @@ typedef struct UvmGpuChannelInfo_tag
// GPU VAs of both GPFIFO and GPPUT are needed in Confidential Computing
// so a channel can be controlled via another channel (SEC2 or WLC/LCIC)
NvU64 gpFifoGpuVa;
NvU64 gpPutGpuVa;
NvU64 gpGetGpuVa;
NvU64 gpFifoGpuVa;
NvU64 gpPutGpuVa;
NvU64 gpGetGpuVa;
// GPU VA of work submission offset is needed in Confidential Computing
// so CE channels can ring doorbell of other channels as required for
// WLC/LCIC work submission
NvU64 workSubmissionOffsetGpuVa;
NvU64 workSubmissionOffsetGpuVa;
} UvmGpuChannelInfo;
typedef enum
@@ -321,10 +321,6 @@ typedef struct UvmGpuChannelAllocParams_tag
// The next two fields store UVM_BUFFER_LOCATION values
NvU32 gpFifoLoc;
NvU32 gpPutLoc;
// Allocate the channel as secure. This flag should only be set when
// Confidential Compute is enabled.
NvBool secure;
} UvmGpuChannelAllocParams;
typedef struct UvmGpuPagingChannelAllocParams_tag
@@ -371,6 +367,9 @@ typedef struct
// True if the CE supports encryption
NvBool secure:1;
// True if the CE can be used for fast scrub
NvBool scrub:1;
// Mask of physical CEs assigned to this LCE
//
// The value returned by RM for this field may change when a GPU is
@@ -393,6 +392,8 @@ typedef enum
UVM_LINK_TYPE_NVLINK_2,
UVM_LINK_TYPE_NVLINK_3,
UVM_LINK_TYPE_NVLINK_4,
UVM_LINK_TYPE_NVLINK_5,
UVM_LINK_TYPE_C2C,
} UVM_LINK_TYPE;
typedef struct UvmGpuCaps_tag
@@ -437,80 +438,22 @@ typedef struct UvmGpuAllocInfo_tag
// SEV or GPU CC modes are enabled. Ignored otherwise
} UvmGpuAllocInfo;
typedef enum
{
UVM_VIRT_MODE_NONE = 0, // Baremetal or passthrough virtualization
UVM_VIRT_MODE_LEGACY = 1, // Virtualization without SRIOV support
UVM_VIRT_MODE_SRIOV_HEAVY = 2, // Virtualization with SRIOV Heavy configured
UVM_VIRT_MODE_SRIOV_STANDARD = 3, // Virtualization with SRIOV Standard configured
UVM_VIRT_MODE_COUNT = 4,
} UVM_VIRT_MODE;
// !!! The following enums (with UvmRm prefix) are defined and documented in
// mm/uvm/interface/uvm_types.h and must be mirrored. Please refer to that file
// for more details.
// UVM GPU mapping types
typedef enum
{
UvmRmGpuMappingTypeDefault = 0,
UvmRmGpuMappingTypeReadWriteAtomic = 1,
UvmRmGpuMappingTypeReadWrite = 2,
UvmRmGpuMappingTypeReadOnly = 3,
UvmRmGpuMappingTypeCount = 4
} UvmRmGpuMappingType;
// UVM GPU caching types
typedef enum
{
UvmRmGpuCachingTypeDefault = 0,
UvmRmGpuCachingTypeForceUncached = 1,
UvmRmGpuCachingTypeForceCached = 2,
UvmRmGpuCachingTypeCount = 3
} UvmRmGpuCachingType;
// UVM GPU format types
typedef enum {
UvmRmGpuFormatTypeDefault = 0,
UvmRmGpuFormatTypeBlockLinear = 1,
UvmRmGpuFormatTypeCount = 2
} UvmRmGpuFormatType;
// UVM GPU Element bits types
typedef enum {
UvmRmGpuFormatElementBitsDefault = 0,
UvmRmGpuFormatElementBits8 = 1,
UvmRmGpuFormatElementBits16 = 2,
// Cuda does not support 24-bit width
UvmRmGpuFormatElementBits32 = 4,
UvmRmGpuFormatElementBits64 = 5,
UvmRmGpuFormatElementBits128 = 6,
UvmRmGpuFormatElementBitsCount = 7
} UvmRmGpuFormatElementBits;
// UVM GPU Compression types
typedef enum {
UvmRmGpuCompressionTypeDefault = 0,
UvmRmGpuCompressionTypeEnabledNoPlc = 1,
UvmRmGpuCompressionTypeCount = 2
} UvmRmGpuCompressionType;
typedef struct UvmGpuExternalMappingInfo_tag
{
// In: GPU caching ability.
UvmRmGpuCachingType cachingType;
UvmGpuCachingType cachingType;
// In: Virtual permissions.
UvmRmGpuMappingType mappingType;
UvmGpuMappingType mappingType;
// In: RM virtual mapping memory format
UvmRmGpuFormatType formatType;
UvmGpuFormatType formatType;
// In: RM virtual mapping element bits
UvmRmGpuFormatElementBits elementBits;
UvmGpuFormatElementBits elementBits;
// In: RM virtual compression type
UvmRmGpuCompressionType compressionType;
UvmGpuCompressionType compressionType;
// In: Size of the buffer to store PTEs (in bytes).
NvU64 pteBufferSize;
@@ -536,14 +479,51 @@ typedef struct UvmGpuExternalMappingInfo_tag
// Out: PTE size (in bytes)
NvU32 pteSize;
// Out: UVM needs to invalidate L2 at unmap
NvBool bNeedL2InvalidateAtUnmap;
} UvmGpuExternalMappingInfo;
typedef struct UvmGpuExternalPhysAddrInfo_tag
{
// In: Virtual permissions. Returns
// NV_ERR_INVALID_ACCESS_TYPE if input is
// inaccurate
UvmGpuMappingType mappingType;
// In: Size of the buffer to store PhysAddrs (in bytes).
NvU64 physAddrBufferSize;
// In: Page size for mapping
// If this field is passed as 0, the page size
// of the allocation is used for mapping.
// nvUvmInterfaceGetExternalAllocPtes must pass
// this field as zero.
NvU64 mappingPageSize;
// In: Pointer to a buffer to store PhysAddrs.
// Out: The interface will fill the buffer with PhysAddrs
NvU64 *physAddrBuffer;
// Out: Number of PhysAddrs filled in to the buffer.
NvU64 numWrittenPhysAddrs;
// Out: Number of PhysAddrs remaining to be filled
// if the buffer is not sufficient to accommodate
// requested PhysAddrs.
NvU64 numRemainingPhysAddrs;
} UvmGpuExternalPhysAddrInfo;
typedef struct UvmGpuP2PCapsParams_tag
{
// Out: peerId[i] contains gpu[i]'s peer id of gpu[1 - i]. Only defined if
// the GPUs are direct peers.
NvU32 peerIds[2];
// Out: peerId[i] contains gpu[i]'s EGM peer id of gpu[1 - i]. Only defined
// if the GPUs are direct peers and EGM enabled in the system.
NvU32 egmPeerIds[2];
// Out: UVM_LINK_TYPE
NvU32 p2pLink;
@@ -560,10 +540,16 @@ typedef struct UvmGpuP2PCapsParams_tag
// bandwidth for indirect peers is zero.
NvU32 totalLinkLineRateMBps;
// Out: True if the peers have a indirect link to communicate. On P9
// systems, this is true if peers are connected to different NPUs that
// forward the requests between them.
NvU32 indirectAccess : 1;
// Out: IOMMU/DMA mappings of bar1 of the respective peer vidmem.
// Size is 0 if bar1 p2p is not supported.
NvU64 bar1DmaAddress[2];
NvU64 bar1DmaSize[2];
// True if GPU i can use PCIe atomics on locations in GPU[i-1]
// BAR1. This implies that GPU[i] can issue PCIe atomics,
// GPU[i-1] can accept PCIe atomics, and the bus interconnect
// between the two GPUs can correctly route PCIe atomics.
NvBool bar1PcieAtomics[2];
} UvmGpuP2PCapsParams;
// Platform-wide information
@@ -572,8 +558,11 @@ typedef struct UvmPlatformInfo_tag
// Out: ATS (Address Translation Services) is supported
NvBool atsSupported;
// Out: AMD SEV (Secure Encrypted Virtualization) is enabled
NvBool sevEnabled;
// Out: True if HW trusted execution, such as AMD's SEV-SNP or Intel's TDX,
// is enabled in the VM, indicating that Confidential Computing must be
// also enabled in the GPU(s); these two security features are either both
// enabled, or both disabled.
NvBool confComputingEnabled;
} UvmPlatformInfo;
typedef struct UvmGpuClientInfo_tag
@@ -583,18 +572,13 @@ typedef struct UvmGpuClientInfo_tag
NvHandle hSmcPartRef;
} UvmGpuClientInfo;
typedef enum
{
UVM_GPU_CONF_COMPUTE_MODE_NONE,
UVM_GPU_CONF_COMPUTE_MODE_APM,
UVM_GPU_CONF_COMPUTE_MODE_HCC,
UVM_GPU_CONF_COMPUTE_MODE_COUNT
} UvmGpuConfComputeMode;
typedef struct UvmGpuConfComputeCaps_tag
{
// Out: GPU's confidential compute mode
UvmGpuConfComputeMode mode;
// Out: true if Confidential Computing is enabled on the GPU
NvBool bConfComputingEnabled;
// Out: true if key rotation is enabled (for UVM keys) on the GPU
NvBool bKeyRotationEnabled;
} UvmGpuConfComputeCaps;
#define UVM_GPU_NAME_LENGTH 0x40
@@ -604,7 +588,8 @@ typedef struct UvmGpuInfo_tag
// Printable gpu name
char name[UVM_GPU_NAME_LENGTH];
// Uuid of this gpu
// Uuid of the physical GPU or GI UUID if nvUvmInterfaceGetGpuInfo()
// requested information for a valid SMC partition.
NvProcessorUuid uuid;
// Gpu architecture; NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_*
@@ -650,6 +635,9 @@ typedef struct UvmGpuInfo_tag
// Maximum number of TPCs per GPC
NvU32 maxTpcPerGpcCount;
// Number of access counter buffers.
NvU32 accessCntrBufferCount;
// NV_TRUE if SMC is enabled on this GPU.
NvBool smcEnabled;
@@ -686,6 +674,28 @@ typedef struct UvmGpuInfo_tag
// to NVSwitch peers.
NvBool connectedToSwitch;
NvU64 nvswitchMemoryWindowStart;
// local EGM properties
// NV_TRUE if EGM is enabled
NvBool egmEnabled;
// Peer ID to reach local EGM when EGM is enabled
NvU8 egmPeerId;
// EGM base address to offset in the GMMU PTE entry for EGM mappings
NvU64 egmBaseAddr;
// If connectedToSwitch is NV_TRUE,
// nvswitchEgmMemoryWindowStart tells the base address for the GPU's EGM memory in the
// NVSwitch address space. It is used when creating PTEs of GPU memory mappings
// to NVSwitch peers.
NvU64 nvswitchEgmMemoryWindowStart;
// GPU supports ATS capability
NvBool atsSupport;
// GPU supports Non-PASID ATS capability
NvBool nonPasidAtsSupport;
} UvmGpuInfo;
typedef struct UvmGpuFbInfo_tag
@@ -694,9 +704,16 @@ typedef struct UvmGpuFbInfo_tag
// RM regions that are not registered with PMA either.
NvU64 maxAllocatableAddress;
NvU32 heapSize; // RAM in KB available for user allocations
NvU32 reservedHeapSize; // RAM in KB reserved for internal RM allocation
NvBool bZeroFb; // Zero FB mode enabled.
NvU32 heapSize; // RAM in KB available for user allocations
NvU32 reservedHeapSize; // RAM in KB reserved for internal RM allocation
NvBool bZeroFb; // Zero FB mode enabled.
NvU64 maxVidmemPageSize; // Largest GPU page size to access vidmem.
NvBool bStaticBar1Enabled; // Static BAR1 mode is enabled
NvBool bStaticBar1WriteCombined; // Write combined is enabled
NvU64 staticBar1StartOffset; // The start offset of the the static mapping
NvU64 staticBar1Size; // The size of the static mapping
NvU32 heapStart; // The start offset of heap in KB, helpful for MIG
// systems
} UvmGpuFbInfo;
typedef struct UvmGpuEccInfo_tag
@@ -708,6 +725,15 @@ typedef struct UvmGpuEccInfo_tag
NvBool bEccEnabled;
} UvmGpuEccInfo;
typedef struct UvmGpuNvlinkInfo_tag
{
unsigned nvlinkMask;
unsigned nvlinkOffset;
void *nvlinkReadLocation;
NvBool *nvlinkErrorNotifier;
NvBool bNvlinkRecoveryEnabled;
} UvmGpuNvlinkInfo;
typedef struct UvmPmaAllocationOptions_tag
{
NvU32 flags;
@@ -720,19 +746,6 @@ typedef struct UvmPmaAllocationOptions_tag
NvU32 resultFlags; // valid if the allocation function returns NV_OK
} UvmPmaAllocationOptions;
//
// Mirrored in PMA (PMA_STATS)
//
typedef struct UvmPmaStatistics_tag
{
volatile NvU64 numPages2m; // PMA-wide 2MB pages count across all regions
volatile NvU64 numFreePages64k; // PMA-wide free 64KB page count across all regions
volatile NvU64 numFreePages2m; // PMA-wide free 2MB pages count across all regions
volatile NvU64 numPages2mProtected; // PMA-wide 2MB pages count in protected memory
volatile NvU64 numFreePages64kProtected; // PMA-wide free 64KB page count in protected memory
volatile NvU64 numFreePages2mProtected; // PMA-wide free 2MB pages count in protected memory
} UvmPmaStatistics;
/*******************************************************************************
uvmEventSuspend
This function will be called by the GPU driver to signal to UVM that the
@@ -774,14 +787,14 @@ typedef NV_STATUS (*uvmEventResume_t) (void);
/*******************************************************************************
uvmEventStartDevice
This function will be called by the GPU driver once it has finished its
initialization to tell the UVM driver that this GPU has come up.
initialization to tell the UVM driver that this physical GPU has come up.
*/
typedef NV_STATUS (*uvmEventStartDevice_t) (const NvProcessorUuid *pGpuUuidStruct);
/*******************************************************************************
uvmEventStopDevice
This function will be called by the GPU driver to let UVM know that a GPU
is going down.
This function will be called by the GPU driver to let UVM know that a
physical GPU is going down.
*/
typedef NV_STATUS (*uvmEventStopDevice_t) (const NvProcessorUuid *pGpuUuidStruct);
@@ -812,25 +825,62 @@ typedef NV_STATUS (*uvmEventServiceInterrupt_t) (void *pDeviceObject,
/*******************************************************************************
uvmEventIsrTopHalf_t
This function will be called by the GPU driver to let UVM know
that an interrupt has occurred.
that an interrupt has occurred on the given physical GPU.
Returns:
NV_OK if the UVM driver handled the interrupt
NV_ERR_NO_INTR_PENDING if the interrupt is not for the UVM driver
*/
#if defined (__linux__)
typedef NV_STATUS (*uvmEventIsrTopHalf_t) (const NvProcessorUuid *pGpuUuidStruct);
#else
typedef void (*uvmEventIsrTopHalf_t) (void);
#endif
struct UvmOpsUvmEvents
/*******************************************************************************
uvmEventDrainP2P
This function will be called by the GPU driver to signal to UVM that the
GPU has encountered an uncontained error, and all peer work must be drained
to recover. When it is called, the following assumptions/guarantees are
valid/made:
* Impacted user channels have been preempted and disabled
* UVM channels are still running normally and will continue to do
so unless an unrecoverable error is hit on said channels
* UVM must not return from this function until all enqueued work on
* peer channels has drained
* In the context of this function call, RM will still service faults
* UVM must prevent new peer work from being enqueued until the
uvmEventResumeP2P callback is issued
Returns:
NV_OK if UVM has idled peer work and will prevent new peer workloads.
NV_ERR_TIMEOUT if peer work was unable to be drained within a timeout
XXX NV_ERR_* for any other failure (TBD)
*/
typedef NV_STATUS (*uvmEventDrainP2P_t) (const NvProcessorUuid *pGpuUuidStruct);
/*******************************************************************************
uvmEventResumeP2P
This function will be called by the GPU driver to signal to UVM that the
GPU has recovered from the previously reported uncontained NVLINK error.
When it is called, the following assumptions/guarantees are valid/made:
* UVM is again allowed to enqueue peer work
* UVM channels are still running normally
*/
typedef NV_STATUS (*uvmEventResumeP2P_t) (const NvProcessorUuid *pGpuUuidStruct);
struct UvmEventsLinux
{
uvmEventIsrTopHalf_t isrTopHalf;
uvmEventSuspend_t suspend;
uvmEventResume_t resume;
uvmEventDrainP2P_t drainP2P;
uvmEventResumeP2P_t resumeP2P;
};
struct UvmEventsWindows
{
uvmEventSuspend_t suspend;
uvmEventResume_t resume;
uvmEventStartDevice_t startDevice;
uvmEventStopDevice_t stopDevice;
uvmEventIsrTopHalf_t isrTopHalf;
uvmEventStopDevice_t stopDevice;
#if defined (_WIN32)
uvmEventWddmResetDuringTimeout_t wddmResetDuringTimeout;
uvmEventWddmRestartAfterTimeout_t wddmRestartAfterTimeout;
@@ -851,6 +901,14 @@ typedef union UvmFaultMetadataPacket_tag
NvU8 _padding[32];
} UvmFaultMetadataPacket;
// This struct shall not be accessed nor modified directly by UVM as it is
// entirely managed by the RM layer
typedef struct UvmCslContext_tag
{
struct ccslContext_t *ctx;
void *nvidia_stack;
} UvmCslContext;
typedef struct UvmGpuFaultInfo_tag
{
struct
@@ -892,26 +950,25 @@ typedef struct UvmGpuFaultInfo_tag
NvU32 replayableFaultMask;
// Fault buffer CPU mapping
void* bufferAddress;
//
// When Confidential Computing is disabled, the mapping points to the
// actual HW fault buffer.
//
// When Confidential Computing is enabled, the mapping points to a
// copy of the HW fault buffer. This "shadow buffer" is maintained
// by GSP-RM.
void* bufferAddress;
// Size, in bytes, of the fault buffer pointed by bufferAddress.
NvU32 bufferSize;
// Mapping pointing to the start of the fault buffer metadata containing
// a 16Byte authentication tag and a valid byte. Always NULL when
// Confidential Computing is disabled.
UvmFaultMetadataPacket *bufferMetadata;
// Indicates whether UVM owns the replayable fault buffer.
// The value of this field is always NV_TRUE When Confidential Computing
// is disabled.
NvBool bUvmOwnsHwFaultBuffer;
// CSL context used for performing decryption of replayable faults when
// Confidential Computing is enabled.
UvmCslContext cslCtx;
} replayable;
struct
{
@@ -981,32 +1038,9 @@ typedef struct UvmGpuAccessCntrInfo_tag
NvHandle accessCntrBufferHandle;
} UvmGpuAccessCntrInfo;
typedef enum
{
UVM_ACCESS_COUNTER_GRANULARITY_64K = 1,
UVM_ACCESS_COUNTER_GRANULARITY_2M = 2,
UVM_ACCESS_COUNTER_GRANULARITY_16M = 3,
UVM_ACCESS_COUNTER_GRANULARITY_16G = 4,
} UVM_ACCESS_COUNTER_GRANULARITY;
typedef enum
{
UVM_ACCESS_COUNTER_USE_LIMIT_NONE = 1,
UVM_ACCESS_COUNTER_USE_LIMIT_QTR = 2,
UVM_ACCESS_COUNTER_USE_LIMIT_HALF = 3,
UVM_ACCESS_COUNTER_USE_LIMIT_FULL = 4,
} UVM_ACCESS_COUNTER_USE_LIMIT;
typedef struct UvmGpuAccessCntrConfig_tag
{
NvU32 mimcGranularity;
NvU32 momcGranularity;
NvU32 mimcUseLimit;
NvU32 momcUseLimit;
NvU32 granularity;
NvU32 threshold;
} UvmGpuAccessCntrConfig;
@@ -1036,24 +1070,18 @@ typedef UvmGpuAccessCntrConfig gpuAccessCntrConfig;
typedef UvmGpuFaultInfo gpuFaultInfo;
typedef UvmGpuMemoryInfo gpuMemoryInfo;
typedef UvmGpuExternalMappingInfo gpuExternalMappingInfo;
typedef UvmGpuExternalPhysAddrInfo gpuExternalPhysAddrInfo;
typedef UvmGpuChannelResourceInfo gpuChannelResourceInfo;
typedef UvmGpuChannelInstanceInfo gpuChannelInstanceInfo;
typedef UvmGpuChannelResourceBindParams gpuChannelResourceBindParams;
typedef UvmGpuFbInfo gpuFbInfo;
typedef UvmGpuEccInfo gpuEccInfo;
typedef UvmGpuNvlinkInfo gpuNvlinkInfo;
typedef UvmGpuPagingChannel *gpuPagingChannelHandle;
typedef UvmGpuPagingChannelInfo gpuPagingChannelInfo;
typedef UvmGpuPagingChannelAllocParams gpuPagingChannelAllocParams;
typedef UvmPmaAllocationOptions gpuPmaAllocationOptions;
// This struct shall not be accessed nor modified directly by UVM as it is
// entirely managed by the RM layer
typedef struct UvmCslContext_tag
{
struct ccslContext_t *ctx;
void *nvidia_stack;
} UvmCslContext;
typedef struct UvmCslIv
{
NvU8 iv[12];
@@ -1066,4 +1094,21 @@ typedef enum UvmCslOperation
UVM_CSL_OPERATION_DECRYPT
} UvmCslOperation;
typedef enum UVM_KEY_ROTATION_STATUS {
// Key rotation complete/not in progress
UVM_KEY_ROTATION_STATUS_IDLE = 0,
// RM is waiting for clients to report their channels are idle for key rotation
UVM_KEY_ROTATION_STATUS_PENDING = 1,
// Key rotation is in progress
UVM_KEY_ROTATION_STATUS_IN_PROGRESS = 2,
// Key rotation timeout failure, RM will RC non-idle channels.
// UVM should never see this status value.
UVM_KEY_ROTATION_STATUS_FAILED_TIMEOUT = 3,
// Key rotation failed because upper threshold was crossed, RM will RC non-idle channels
UVM_KEY_ROTATION_STATUS_FAILED_THRESHOLD = 4,
// Internal RM failure while rotating keys for a certain channel, RM will RC the channel.
UVM_KEY_ROTATION_STATUS_FAILED_ROTATION = 5,
UVM_KEY_ROTATION_STATUS_MAX_COUNT = 6,
} UVM_KEY_ROTATION_STATUS;
#endif // _NV_UVM_TYPES_H_

View File

@@ -0,0 +1,166 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
//
// This file provides common types for the UVM kernel driver, UVM user layer,
// and RM's UVM interface.
//
#ifndef _NV_UVM_USER_TYPES_H_
#define _NV_UVM_USER_TYPES_H_
#include "nvtypes.h"
//
// Default Page Size if left "0" because in RM BIG page size is default & there
// are multiple BIG page sizes in RM. These defines are used as flags to "0"
// should be OK when user is not sure which pagesize allocation it wants
//
#define UVM_PAGE_SIZE_DEFAULT 0x0ULL
#define UVM_PAGE_SIZE_4K 0x1000ULL
#define UVM_PAGE_SIZE_64K 0x10000ULL
#define UVM_PAGE_SIZE_128K 0x20000ULL
#define UVM_PAGE_SIZE_2M 0x200000ULL
#define UVM_PAGE_SIZE_512M 0x20000000ULL
#define UVM_PAGE_SIZE_256G 0x4000000000ULL
typedef enum
{
UVM_VIRT_MODE_NONE = 0, // Baremetal or passthrough virtualization
UVM_VIRT_MODE_LEGACY = 1, // Virtualization without SRIOV support
UVM_VIRT_MODE_SRIOV_HEAVY = 2, // Virtualization with SRIOV Heavy configured
UVM_VIRT_MODE_SRIOV_STANDARD = 3, // Virtualization with SRIOV Standard configured
UVM_VIRT_MODE_COUNT = 4,
} UVM_VIRT_MODE;
//------------------------------------------------------------------------------
// UVM GPU mapping types
//
// These types indicate the kinds of accesses allowed from a given GPU at the
// specified virtual address range. There are 3 basic kinds of accesses: read,
// write and atomics. Each type indicates what kinds of accesses are allowed.
// Accesses of any disallowed kind are fatal. The "Default" type specifies that
// the UVM driver should decide on the types of accesses allowed.
//------------------------------------------------------------------------------
typedef enum
{
UvmGpuMappingTypeDefault = 0,
UvmGpuMappingTypeReadWriteAtomic = 1,
UvmGpuMappingTypeReadWrite = 2,
UvmGpuMappingTypeReadOnly = 3,
UvmGpuMappingTypeCount = 4
} UvmGpuMappingType;
//------------------------------------------------------------------------------
// UVM GPU caching types
//
// These types indicate the cacheability of the specified virtual address range
// from a given GPU. The "Default" type specifies that the UVM driver should
// set caching on or off as required to follow the UVM coherence model. The
// "ForceUncached" and "ForceCached" types will always turn caching off or on
// respectively. These two types override the cacheability specified by the UVM
// coherence model.
//------------------------------------------------------------------------------
typedef enum
{
UvmGpuCachingTypeDefault = 0,
UvmGpuCachingTypeForceUncached = 1,
UvmGpuCachingTypeForceCached = 2,
UvmGpuCachingTypeCount = 3
} UvmGpuCachingType;
//------------------------------------------------------------------------------
// UVM GPU format types
//
// These types indicate the memory format of the specified virtual address
// range for a given GPU. The "Default" type specifies that the UVM driver will
// detect the format based on the allocation and is mutually inclusive with
// UvmGpuFormatElementBitsDefault.
//------------------------------------------------------------------------------
typedef enum {
UvmGpuFormatTypeDefault = 0,
UvmGpuFormatTypeBlockLinear = 1,
UvmGpuFormatTypeCount = 2
} UvmGpuFormatType;
//------------------------------------------------------------------------------
// UVM GPU Element bits types
//
// These types indicate the element size of the specified virtual address range
// for a given GPU. The "Default" type specifies that the UVM driver will
// detect the element size based on the allocation and is mutually inclusive
// with UvmGpuFormatTypeDefault. The element size is specified in bits:
// UvmGpuFormatElementBits8 uses the 8-bits format.
//------------------------------------------------------------------------------
typedef enum {
UvmGpuFormatElementBitsDefault = 0,
UvmGpuFormatElementBits8 = 1,
UvmGpuFormatElementBits16 = 2,
// Cuda does not support 24-bit width
UvmGpuFormatElementBits32 = 4,
UvmGpuFormatElementBits64 = 5,
UvmGpuFormatElementBits128 = 6,
UvmGpuFormatElementBitsCount = 7
} UvmGpuFormatElementBits;
//------------------------------------------------------------------------------
// UVM GPU Compression types
//
// These types indicate the compression type of the specified virtual address
// range for a given GPU. The "Default" type specifies that the UVM driver will
// detect the compression attributes based on the allocation. Any type other
// than the default will override the compression behavior of the physical
// allocation. UvmGpuCompressionTypeEnabledNoPlc will disable PLC but enables
// generic compression. UvmGpuCompressionTypeEnabledNoPlc type is only supported
// on Turing plus GPUs. Since UvmGpuCompressionTypeEnabledNoPlc type enables
// generic compression, it can only be used when the compression attribute of
// the underlying physical allocation is enabled.
//------------------------------------------------------------------------------
typedef enum {
UvmGpuCompressionTypeDefault = 0,
UvmGpuCompressionTypeEnabledNoPlc = 1,
UvmGpuCompressionTypeCount = 2
} UvmGpuCompressionType;
//
// Mirrored in PMA (PMA_STATS)
//
typedef struct UvmPmaStatistics_tag
{
volatile NvU64 numPages2m; // PMA-wide 2MB pages count across all regions
volatile NvU64 numFreePages64k; // PMA-wide free 64KB page count across all regions
volatile NvU64 numFreePages2m; // PMA-wide free 2MB pages count across all regions
volatile NvU64 numPages2mProtected; // PMA-wide 2MB pages count in protected memory
volatile NvU64 numFreePages64kProtected; // PMA-wide free 64KB page count in protected memory
volatile NvU64 numFreePages2mProtected; // PMA-wide free 2MB pages count in protected memory
} UvmPmaStatistics;
typedef enum
{
UVM_ACCESS_COUNTER_GRANULARITY_64K = 1,
UVM_ACCESS_COUNTER_GRANULARITY_2M = 2,
UVM_ACCESS_COUNTER_GRANULARITY_16M = 3,
UVM_ACCESS_COUNTER_GRANULARITY_16G = 4,
} UVM_ACCESS_COUNTER_GRANULARITY;
#endif // _NV_UVM_USER_TYPES_H_

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -69,10 +69,6 @@ typedef struct DRAM_CLK_INSTANCE {
// This table is used to collect information from other modules that is needed
// for RM IMP calculations. (Used on Tegra only.)
//
#define TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_UNKNOWN 0U
#define TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDR4 1U
#define TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDR5 2U
typedef struct TEGRA_IMP_IMPORT_DATA {
//
// max_iso_bw_kbps stores the maximum possible ISO bandwidth available to
@@ -82,7 +78,6 @@ typedef struct TEGRA_IMP_IMPORT_DATA {
//
NvU32 max_iso_bw_kbps;
NvU32 dram_type;
// On Orin, each dram channel is 16 bits wide.
NvU32 num_dram_channels;

View File

@@ -47,6 +47,11 @@
#define NVKMS_MAX_SUPERFRAME_VIEWS 4
#define NVKMS_LOG2_LUT_ARRAY_SIZE 10
#define NVKMS_LUT_ARRAY_SIZE (1 << NVKMS_LOG2_LUT_ARRAY_SIZE)
#define NVKMS_OLUT_FP_NORM_SCALE_DEFAULT 0xffffffff
typedef NvU32 NvKmsDeviceHandle;
typedef NvU32 NvKmsDispHandle;
typedef NvU32 NvKmsConnectorHandle;
@@ -55,6 +60,7 @@ typedef NvU32 NvKmsFrameLockHandle;
typedef NvU32 NvKmsDeferredRequestFifoHandle;
typedef NvU32 NvKmsSwapGroupHandle;
typedef NvU32 NvKmsVblankSyncObjectHandle;
typedef NvU32 NvKmsVblankSemControlHandle;
struct NvKmsSize {
NvU16 width;
@@ -181,6 +187,14 @@ enum NvKmsEventType {
NVKMS_EVENT_TYPE_FLIP_OCCURRED,
};
enum NvKmsFlipResult {
NV_KMS_FLIP_RESULT_SUCCESS = 0, /* Success */
NV_KMS_FLIP_RESULT_INVALID_PARAMS, /* Parameter validation failed */
NV_KMS_FLIP_RESULT_IN_PROGRESS, /* Flip would fail because an outstanding
flip containing changes that cannot be
queued is in progress */
};
typedef enum {
NV_EVO_SCALER_1TAP = 0,
NV_EVO_SCALER_2TAPS = 1,
@@ -223,6 +237,90 @@ struct NvKmsUsageBounds {
} layer[NVKMS_MAX_LAYERS_PER_HEAD];
};
/*!
* Per-component arrays of NvU16s describing the LUT; used for both the input
* LUT and output LUT.
*/
struct NvKmsLutRamps {
NvU16 red[NVKMS_LUT_ARRAY_SIZE]; /*! in */
NvU16 green[NVKMS_LUT_ARRAY_SIZE]; /*! in */
NvU16 blue[NVKMS_LUT_ARRAY_SIZE]; /*! in */
};
/* Datatypes for LUT capabilities */
enum NvKmsLUTFormat {
/*
* Normalized fixed-point format mapping [0, 1] to [0x0, 0xFFFF].
*/
NVKMS_LUT_FORMAT_UNORM16,
/*
* Half-precision floating point.
*/
NVKMS_LUT_FORMAT_FP16,
/*
* 14-bit fixed-point format required to work around hardware bug 813188.
*
* To convert from UNORM16 to UNORM14_WAR_813188:
* unorm14_war_813188 = ((unorm16 >> 2) & ~7) + 0x6000
*/
NVKMS_LUT_FORMAT_UNORM14_WAR_813188
};
enum NvKmsLUTVssSupport {
NVKMS_LUT_VSS_NOT_SUPPORTED,
NVKMS_LUT_VSS_SUPPORTED,
NVKMS_LUT_VSS_REQUIRED,
};
enum NvKmsLUTVssType {
NVKMS_LUT_VSS_TYPE_NONE,
NVKMS_LUT_VSS_TYPE_LINEAR,
NVKMS_LUT_VSS_TYPE_LOGARITHMIC,
};
struct NvKmsLUTCaps {
/*! Whether this layer or head on this device supports this LUT stage. */
NvBool supported;
/*! Whether this LUT supports VSS. */
enum NvKmsLUTVssSupport vssSupport;
/*!
* The type of VSS segmenting this LUT uses.
*/
enum NvKmsLUTVssType vssType;
/*!
* Expected number of VSS segments.
*/
NvU32 vssSegments;
/*!
* Expected number of LUT entries.
*/
NvU32 lutEntries;
/*!
* Format for each of the LUT entries.
*/
enum NvKmsLUTFormat entryFormat;
};
/* each LUT entry uses this many bytes */
#define NVKMS_LUT_CAPS_LUT_ENTRY_SIZE (4 * sizeof(NvU16))
/* if the LUT surface uses VSS, size of the VSS header */
#define NVKMS_LUT_VSS_HEADER_SIZE (4 * NVKMS_LUT_CAPS_LUT_ENTRY_SIZE)
struct NvKmsLUTSurfaceParams {
NvKmsSurfaceHandle surfaceHandle;
NvU64 offset NV_ALIGN_BYTES(8);
NvU32 vssSegments;
NvU32 lutEntries;
};
/*
* A 3x4 row-major colorspace conversion matrix.
*
@@ -418,9 +516,9 @@ struct NvKmsLayerCapabilities {
NvBool supportsWindowMode :1;
/*!
* Whether layer supports HDR pipe.
* Whether layer supports ICtCp pipe.
*/
NvBool supportsHDR :1;
NvBool supportsICtCp :1;
/*!
@@ -441,6 +539,10 @@ struct NvKmsLayerCapabilities {
* still expected to honor the NvKmsUsageBounds for each head.
*/
NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8);
/* Capabilities for each LUT stage in the EVO3 precomp pipeline. */
struct NvKmsLUTCaps ilut;
struct NvKmsLUTCaps tmo;
};
/*!
@@ -538,22 +640,34 @@ enum NvKmsInputColorRange {
* If DEFAULT is provided, driver will assume full range for RGB formats
* and limited range for YUV formats.
*/
NVKMS_INPUT_COLORRANGE_DEFAULT = 0,
NVKMS_INPUT_COLOR_RANGE_DEFAULT = 0,
NVKMS_INPUT_COLORRANGE_LIMITED = 1,
NVKMS_INPUT_COLOR_RANGE_LIMITED = 1,
NVKMS_INPUT_COLORRANGE_FULL = 2,
NVKMS_INPUT_COLOR_RANGE_FULL = 2,
};
enum NvKmsInputColorSpace {
/* Unknown colorspace; no de-gamma will be applied */
NVKMS_INPUT_COLORSPACE_NONE = 0,
/* Unknown colorspace */
NVKMS_INPUT_COLOR_SPACE_NONE = 0,
/* Linear, Rec.709 [-0.5, 7.5) */
NVKMS_INPUT_COLORSPACE_SCRGB_LINEAR = 1,
NVKMS_INPUT_COLOR_SPACE_BT601 = 1,
NVKMS_INPUT_COLOR_SPACE_BT709 = 2,
NVKMS_INPUT_COLOR_SPACE_BT2020 = 3,
NVKMS_INPUT_COLOR_SPACE_BT2100 = NVKMS_INPUT_COLOR_SPACE_BT2020,
/* PQ, Rec.2020 unity */
NVKMS_INPUT_COLORSPACE_BT2100_PQ = 2,
NVKMS_INPUT_COLOR_SPACE_SCRGB = 4
};
enum NvKmsInputTf {
NVKMS_INPUT_TF_LINEAR = 0,
NVKMS_INPUT_TF_PQ = 1
};
enum NvKmsOutputColorimetry {
NVKMS_OUTPUT_COLORIMETRY_DEFAULT = 0,
NVKMS_OUTPUT_COLORIMETRY_BT2100 = 1,
};
enum NvKmsOutputTf {
@@ -566,6 +680,17 @@ enum NvKmsOutputTf {
NVKMS_OUTPUT_TF_PQ = 2,
};
/*!
* EOTF Data Byte 1 as per CTA-861-G spec.
* This is expected to match exactly with the spec.
*/
enum NvKmsInfoFrameEOTF {
NVKMS_INFOFRAME_EOTF_SDR_GAMMA = 0,
NVKMS_INFOFRAME_EOTF_HDR_GAMMA = 1,
NVKMS_INFOFRAME_EOTF_ST2084 = 2,
NVKMS_INFOFRAME_EOTF_HLG = 3,
};
/*!
* HDR Static Metadata Type1 Descriptor as per CEA-861.3 spec.
* This is expected to match exactly with the spec.
@@ -644,4 +769,20 @@ struct NvKmsSuperframeInfo {
} view[NVKMS_MAX_SUPERFRAME_VIEWS];
};
/* Fields within NvKmsVblankSemControlDataOneHead::flags */
#define NVKMS_VBLANK_SEM_CONTROL_SWAP_INTERVAL 15:0
struct NvKmsVblankSemControlDataOneHead {
NvU32 requestCounterAccel;
NvU32 requestCounter;
NvU32 flags;
NvU32 semaphore;
NvU64 vblankCount NV_ALIGN_BYTES(8);
};
struct NvKmsVblankSemControlData {
struct NvKmsVblankSemControlDataOneHead head[NV_MAX_HEADS];
};
#endif /* NVKMS_API_TYPES_H */

View File

@@ -24,8 +24,10 @@
#if !defined(__NVKMS_KAPI_H__)
#include "nvtypes.h"
#include "nv_mig_types.h"
#include "nv-gpu-info.h"
#include "nv_dpy_id.h"
#include "nvkms-api-types.h"
#include "nvkms-format.h"
@@ -124,6 +126,14 @@ struct NvKmsKapiDisplayMode {
#define NVKMS_KAPI_LAYER_INVALID_IDX 0xff
#define NVKMS_KAPI_LAYER_PRIMARY_IDX 0
struct NvKmsKapiLutCaps {
struct {
struct NvKmsLUTCaps ilut;
struct NvKmsLUTCaps tmo;
} layer[NVKMS_KAPI_LAYER_MAX];
struct NvKmsLUTCaps olut;
};
struct NvKmsKapiDeviceResourcesInfo {
NvU32 numHeads;
@@ -158,13 +168,25 @@ struct NvKmsKapiDeviceResourcesInfo {
NvU32 hasVideoMemory;
NvU32 numDisplaySemaphores;
NvU8 genericPageKind;
NvBool supportsSyncpts;
NvBool requiresVrrSemaphores;
NvBool supportsInputColorRange;
NvBool supportsInputColorSpace;
} caps;
NvU64 supportedSurfaceMemoryFormats[NVKMS_KAPI_LAYER_MAX];
NvBool supportsHDR[NVKMS_KAPI_LAYER_MAX];
NvBool supportsICtCp[NVKMS_KAPI_LAYER_MAX];
struct NvKmsKapiLutCaps lutCaps;
NvU64 vtFbBaseAddress;
NvU64 vtFbSize;
};
#define NVKMS_KAPI_LAYER_MASK(layerType) (1 << (layerType))
@@ -190,6 +212,7 @@ struct NvKmsKapiConnectorInfo {
NvU32 numIncompatibleConnectors;
NvKmsKapiConnector incompatibleConnectorHandles[NVKMS_KAPI_MAX_CONNECTORS];
NVDpyIdList dynamicDpyIdList;
};
struct NvKmsKapiStaticDisplayInfo {
@@ -208,20 +231,30 @@ struct NvKmsKapiStaticDisplayInfo {
NvKmsKapiDisplay possibleCloneHandles[NVKMS_KAPI_MAX_CLONE_DISPLAYS];
NvU32 headMask;
NvBool isDpMST;
};
struct NvKmsKapiSyncpt {
struct NvKmsKapiSyncParams {
union {
struct {
/*!
* Possible syncpt use case in kapi.
* For pre-syncpt, use only id and value
* and for post-syncpt, use only fd.
*/
NvU32 preSyncptId;
NvU32 preSyncptValue;
} syncpt;
/*!
* Possible syncpt use case in kapi.
* For pre-syncpt, use only id and value
* and for post-syncpt, use only fd.
*/
NvBool preSyncptSpecified;
NvU32 preSyncptId;
NvU32 preSyncptValue;
struct {
NvU32 index;
} semaphore;
} u;
NvBool postSyncptRequested;
NvBool preSyncptSpecified;
NvBool postSyncptRequested;
NvBool semaphoreSpecified;
};
struct NvKmsKapiLayerConfig {
@@ -231,12 +264,15 @@ struct NvKmsKapiLayerConfig {
NvU8 surfaceAlpha;
} compParams;
struct NvKmsRRParams rrParams;
struct NvKmsKapiSyncpt syncptParams;
struct NvKmsKapiSyncParams syncParams;
struct NvKmsHDRStaticMetadata hdrMetadata;
NvBool hdrMetadataSpecified;
struct {
struct NvKmsHDRStaticMetadata val;
NvBool enabled;
} hdrMetadata;
enum NvKmsOutputTf tf;
enum NvKmsInputTf inputTf;
enum NvKmsOutputTf outputTf;
NvU8 minPresentInterval;
NvBool tearing;
@@ -248,16 +284,58 @@ struct NvKmsKapiLayerConfig {
NvU16 dstWidth, dstHeight;
enum NvKmsInputColorSpace inputColorSpace;
enum NvKmsInputColorRange inputColorRange;
struct {
NvBool enabled;
struct NvKmsKapiSurface *lutSurface;
NvU64 offset;
NvU32 vssSegments;
NvU32 lutEntries;
} ilut;
struct {
NvBool enabled;
struct NvKmsKapiSurface *lutSurface;
NvU64 offset;
NvU32 vssSegments;
NvU32 lutEntries;
} tmo;
struct NvKmsCscMatrix csc;
NvBool cscUseMain;
struct {
struct NvKmsCscMatrix lmsCtm;
struct NvKmsCscMatrix lmsToItpCtm;
struct NvKmsCscMatrix itpToLmsCtm;
struct NvKmsCscMatrix blendCtm;
struct {
NvBool lmsCtm : 1;
NvBool lmsToItpCtm : 1;
NvBool itpToLmsCtm : 1;
NvBool blendCtm : 1;
} enabled;
} matrixOverrides;
};
struct NvKmsKapiLayerRequestedConfig {
struct NvKmsKapiLayerConfig config;
struct {
NvBool surfaceChanged : 1;
NvBool srcXYChanged : 1;
NvBool srcWHChanged : 1;
NvBool dstXYChanged : 1;
NvBool dstWHChanged : 1;
NvBool surfaceChanged : 1;
NvBool srcXYChanged : 1;
NvBool srcWHChanged : 1;
NvBool dstXYChanged : 1;
NvBool dstWHChanged : 1;
NvBool cscChanged : 1;
NvBool inputTfChanged : 1;
NvBool outputTfChanged : 1;
NvBool inputColorSpaceChanged : 1;
NvBool inputColorRangeChanged : 1;
NvBool hdrMetadataChanged : 1;
NvBool matrixOverridesChanged : 1;
NvBool ilutChanged : 1;
NvBool tmoChanged : 1;
} flags;
};
@@ -301,14 +379,52 @@ struct NvKmsKapiHeadModeSetConfig {
struct NvKmsKapiDisplayMode mode;
NvBool vrrEnabled;
struct {
NvBool enabled;
enum NvKmsInfoFrameEOTF eotf;
struct NvKmsHDRStaticMetadata staticMetadata;
} hdrInfoFrame;
enum NvKmsOutputColorimetry colorimetry;
struct {
struct {
NvU32 depth;
NvU32 start;
NvU32 end;
struct NvKmsLutRamps *pRamps;
} input;
struct {
NvBool enabled;
struct NvKmsLutRamps *pRamps;
} output;
} lut;
struct {
NvBool enabled;
struct NvKmsKapiSurface *lutSurface;
NvU64 offset;
NvU32 vssSegments;
NvU32 lutEntries;
} olut;
NvU32 olutFpNormScale;
};
struct NvKmsKapiHeadRequestedConfig {
struct NvKmsKapiHeadModeSetConfig modeSetConfig;
struct {
NvBool activeChanged : 1;
NvBool displaysChanged : 1;
NvBool modeChanged : 1;
NvBool activeChanged : 1;
NvBool displaysChanged : 1;
NvBool modeChanged : 1;
NvBool hdrInfoFrameChanged : 1;
NvBool colorimetryChanged : 1;
NvBool legacyIlutChanged : 1;
NvBool legacyOlutChanged : 1;
NvBool olutChanged : 1;
NvBool olutFpNormScaleChanged : 1;
} flags;
struct NvKmsKapiCursorRequestedConfig cursorRequestedConfig;
@@ -333,6 +449,9 @@ struct NvKmsKapiHeadReplyConfig {
};
struct NvKmsKapiModeSetReplyConfig {
enum NvKmsFlipResult flipResult;
NvBool vrrFlip;
NvS32 vrrSemaphoreIndex;
struct NvKmsKapiHeadReplyConfig
headReplyConfig[NVKMS_KAPI_MAX_HEADS];
};
@@ -378,6 +497,8 @@ struct NvKmsKapiEvent {
struct NvKmsKapiAllocateDeviceParams {
/* [IN] GPU ID obtained from enumerateGpus() */
NvU32 gpuId;
/* [IN] MIG device if requested */
MIGDeviceId migDevice;
/* [IN] Private data of device allocator */
void *privateData;
@@ -449,12 +570,41 @@ enum NvKmsKapiAllocationType {
NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN = 2,
};
struct NvKmsKapiAllocateMemoryParams {
/* [IN] BlockLinear or Pitch */
enum NvKmsSurfaceMemoryLayout layout;
/* [IN] Allocation type */
enum NvKmsKapiAllocationType type;
/* [IN] Size, in bytes, of the memory to allocate */
NvU64 size;
/* [IN] Whether memory can be updated directly on the screen */
NvBool noDisplayCaching;
/* [IN] Whether to allocate memory from video memory or system memory */
NvBool useVideoMemory;
/* [IN/OUT] For input, non-zero if compression backing store should be
* allocated for the memory, for output, non-zero if compression backing
* store was allocated for the memory */
NvU8 *compressible;
};
typedef enum NvKmsKapiRegisterWaiterResultRec {
NVKMS_KAPI_REG_WAITER_FAILED,
NVKMS_KAPI_REG_WAITER_SUCCESS,
NVKMS_KAPI_REG_WAITER_ALREADY_SIGNALLED,
} NvKmsKapiRegisterWaiterResult;
typedef void NvKmsKapiSuspendResumeCallbackFunc(NvBool suspend);
struct NvKmsKapiGpuInfo {
nv_gpu_info_t gpuInfo;
MIGDeviceId migDevice;
};
struct NvKmsKapiFunctionsTable {
/*!
@@ -471,14 +621,19 @@ struct NvKmsKapiFunctionsTable {
} systemInfo;
/*!
* Enumerate the available physical GPUs that can be used with NVKMS.
* Enumerate the available GPUs that can be used with NVKMS.
*
* \param [out] gpuInfo The information of the enumerated GPUs.
* It is an array of NVIDIA_MAX_GPUS elements.
* The gpuCallback will be called with a NvKmsKapiGpuInfo for each
* physical and MIG GPU currently available in the system.
*
* \param [in] gpuCallback Client function to handle each GPU.
*
* \return Count of enumerated gpus.
*/
NvU32 (*enumerateGpus)(nv_gpu_info_t *gpuInfo);
NvU32 (*enumerateGpus)
(
void (*gpuCallback)(const struct NvKmsKapiGpuInfo *info)
);
/*!
* Allocate an NVK device using which you can query/allocate resources on
@@ -540,8 +695,8 @@ struct NvKmsKapiFunctionsTable {
);
/*!
* Revoke permissions previously granted. Only one (dispIndex, head,
* display) is currently supported.
* Revoke modeset permissions previously granted. Only one (dispIndex,
* head, display) is currently supported.
*
* \param [in] device A device returned by allocateDevice().
*
@@ -558,6 +713,34 @@ struct NvKmsKapiFunctionsTable {
NvKmsKapiDisplay display
);
/*!
* Grant modeset sub-owner permissions to fd. This is used by clients to
* convert drm 'master' permissions into nvkms sub-owner permission.
*
* \param [in] fd fd from opening /dev/nvidia-modeset.
*
* \param [in] device A device returned by allocateDevice().
*
* \return NV_TRUE on success, NV_FALSE on failure.
*/
NvBool (*grantSubOwnership)
(
NvS32 fd,
struct NvKmsKapiDevice *device
);
/*!
* Revoke sub-owner permissions previously granted.
*
* \param [in] device A device returned by allocateDevice().
*
* \return NV_TRUE on success, NV_FALSE on failure.
*/
NvBool (*revokeSubOwnership)
(
struct NvKmsKapiDevice *device
);
/*!
* Registers for notification, via
* NvKmsKapiAllocateDeviceParams::eventCallback, of the events specified
@@ -680,66 +863,22 @@ struct NvKmsKapiFunctionsTable {
);
/*!
* Allocate some unformatted video memory of the specified size.
* Allocate some unformatted video or system memory of the specified size.
*
* This function allocates video memory on the specified GPU.
* It should be suitable for mapping on the CPU as a pitch
* linear or block-linear surface.
* This function allocates video or system memory on the specified GPU. It
* should be suitable for mapping on the CPU as a pitch linear or
* block-linear surface.
*
* \param [in] device A device allocated using allocateDevice().
* \param [in] device A device allocated using allocateDevice().
*
* \param [in] layout BlockLinear or Pitch.
*
* \param [in] type Allocation type.
*
* \param [in] size Size, in bytes, of the memory to allocate.
*
* \param [in/out] compressible For input, non-zero if compression
* backing store should be allocated for
* the memory, for output, non-zero if
* compression backing store was
* allocated for the memory.
* \param [in/out] params Parameters required for memory allocation.
*
* \return An valid memory handle on success, NULL on failure.
*/
struct NvKmsKapiMemory* (*allocateVideoMemory)
struct NvKmsKapiMemory* (*allocateMemory)
(
struct NvKmsKapiDevice *device,
enum NvKmsSurfaceMemoryLayout layout,
enum NvKmsKapiAllocationType type,
NvU64 size,
NvU8 *compressible
);
/*!
* Allocate some unformatted system memory of the specified size.
*
* This function allocates system memory . It should be suitable
* for mapping on the CPU as a pitch linear or block-linear surface.
*
* \param [in] device A device allocated using allocateDevice().
*
* \param [in] layout BlockLinear or Pitch.
*
* \param [in] type Allocation type.
*
* \param [in] size Size, in bytes, of the memory to allocate.
*
* \param [in/out] compressible For input, non-zero if compression
* backing store should be allocated for
* the memory, for output, non-zero if
* compression backing store was
* allocated for the memory.
*
* \return An valid memory handle on success, NULL on failure.
*/
struct NvKmsKapiMemory* (*allocateSystemMemory)
(
struct NvKmsKapiDevice *device,
enum NvKmsSurfaceMemoryLayout layout,
enum NvKmsKapiAllocationType type,
NvU64 size,
NvU8 *compressible
struct NvKmsKapiAllocateMemoryParams *params
);
/*!
@@ -878,6 +1017,17 @@ struct NvKmsKapiFunctionsTable {
const void *pLinearAddress
);
/*!
* Check if memory object allocated is video memory.
*
* \param [in] memory Memory allocated using allocateMemory()
*
* \return NV_TRUE if memory is vidmem, NV_FALSE otherwise.
*/
NvBool (*isVidmem)(
const struct NvKmsKapiMemory *memory
);
/*!
* Create a formatted surface from an NvKmsKapiMemory object.
*
@@ -1094,21 +1244,6 @@ struct NvKmsKapiFunctionsTable {
NvU64 *pPages
);
/*!
* Check if this memory object can be scanned out for display.
*
* \param [in] device A device allocated using allocateDevice().
*
* \param [in] memory The memory object to check for display support.
*
* \return NV_TRUE if this memory can be displayed, NV_FALSE if not.
*/
NvBool (*isMemoryValidForDisplay)
(
const struct NvKmsKapiDevice *device,
const struct NvKmsKapiMemory *memory
);
/*
* Import SGT as a memory handle.
*
@@ -1336,6 +1471,126 @@ struct NvKmsKapiFunctionsTable {
NvU64 index,
NvU64 new_value
);
/*!
* Set the callback function for suspending and resuming the display system.
*/
void
(*setSuspendResumeCallback)
(
NvKmsKapiSuspendResumeCallbackFunc *function
);
/*!
* Immediately initialize the specified display semaphore to the pending state.
*
* Must be called prior to applying a mode set that utilizes the specified
* display semaphore for synchronization.
*
* \param [in] device The device which will utilize the semaphore.
*
* \param [in] semaphoreIndex Index of the desired semaphore within the
* NVKMS semaphore pool. Must be less than
* NvKmsKapiDeviceResourcesInfo::caps::numDisplaySemaphores
* for the specified device.
*/
NvBool
(*tryInitDisplaySemaphore)
(
struct NvKmsKapiDevice *device,
NvU32 semaphoreIndex
);
/*!
* Immediately set the specified display semaphore to the displayable state.
*
* Must be called after \ref tryInitDisplaySemaphore to indicate a mode
* configuration change that utilizes the specified display semaphore for
* synchronization may proceed.
*
* \param [in] device The device which will utilize the semaphore.
*
* \param [in] semaphoreIndex Index of the desired semaphore within the
* NVKMS semaphore pool. Must be less than
* NvKmsKapiDeviceResourcesInfo::caps::numDisplaySemaphores
* for the specified device.
*/
void
(*signalDisplaySemaphore)
(
struct NvKmsKapiDevice *device,
NvU32 semaphoreIndex
);
/*!
* Immediately cancel use of a display semaphore by resetting its value to
* its initial state.
*
* This can be used by clients to restore a semaphore to a consistent state
* when they have prepared it for use by previously calling
* \ref tryInitDisplaySemaphore() on it, but are then prevented from
* submitting the associated hardware operations to consume it due to the
* subsequent failure of some software or hardware operation.
*
* \param [in] device The device which will utilize the semaphore.
*
* \param [in] semaphoreIndex Index of the desired semaphore within the
* NVKMS semaphore pool. Must be less than
* NvKmsKapiDeviceResourcesInfo::caps::numDisplaySemaphores
* for the specified device.
*/
void
(*cancelDisplaySemaphore)
(
struct NvKmsKapiDevice *device,
NvU32 semaphoreIndex
);
/*!
* Signal the VRR semaphore at the specified index from the CPU.
* If device does not support VRR semaphores, this is a no-op.
* Returns true if signal is success or no-op, otherwise returns false.
*
* \param [in] device A device allocated using allocateDevice().
*
* \param [in] index The VRR semaphore index to be signalled.
*/
NvBool
(*signalVrrSemaphore)
(
struct NvKmsKapiDevice *device,
NvS32 index
);
/*!
* Check or wait on a head's LUT notifier.
*
* \param [in] device A device allocated using allocateDevice().
*
* \param [in] head The head to check for LUT completion.
*
* \param [in] waitForCompletion If true, wait for the notifier in NvKms
* before returning.
*
* \param [out] complete Returns whether the notifier has completed.
*/
NvBool
(*checkLutNotifier)
(
struct NvKmsKapiDevice *device,
NvU32 head,
NvBool waitForCompletion
);
/*
* Notify NVKMS that the system's framebuffer console has been disabled and
* the reserved allocation for the old framebuffer console can be unmapped.
*/
void
(*framebufferConsoleDisabled)
(
struct NvKmsKapiDevice *device
);
};
/** @} */
@@ -1350,6 +1605,20 @@ NvBool nvKmsKapiGetFunctionsTable
struct NvKmsKapiFunctionsTable *funcsTable
);
NvU32 nvKmsKapiF16ToF32(NvU16 a);
NvU16 nvKmsKapiF32ToF16(NvU32 a);
NvU32 nvKmsKapiF32Mul(NvU32 a, NvU32 b);
NvU32 nvKmsKapiF32Div(NvU32 a, NvU32 b);
NvU32 nvKmsKapiF32Add(NvU32 a, NvU32 b);
NvU32 nvKmsKapiF32ToUI32RMinMag(NvU32 a, NvBool exact);
NvU32 nvKmsKapiUI32ToF32(NvU32 a);
/** @} */
#endif /* defined(__NVKMS_KAPI_H__) */

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2017-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -34,19 +34,25 @@
/*
* This is the maximum number of GPUs supported in a single system.
*/
#define NV_MAX_DEVICES 32
#define NV_MAX_DEVICES 32
/*
* This is the maximum number of subdevices within a single device.
*/
#define NV_MAX_SUBDEVICES 8
#define NV_MAX_SUBDEVICES 8
/*
* This is the maximum length of the process name string.
*/
#define NV_PROC_NAME_MAX_LENGTH 100U
#define NV_PROC_NAME_MAX_LENGTH 100U
/*
* This is the maximum number of heads per GPU.
*/
#define NV_MAX_HEADS 4
#define NV_MAX_HEADS 4
/*
* Maximum length of a MIG device UUID. It is a 36-byte UUID string plus a
* 4-byte prefix and NUL terminator: 'M' 'I' 'G' '-' UUID '\0x0'
*/
#define NV_MIG_DEVICE_UUID_STR_LENGTH 41U

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -33,43 +33,26 @@ extern "C" {
#include "nvtypes.h"
#if !defined(NVIDIA_UNDEF_LEGACY_BIT_MACROS)
//
// Miscellaneous macros useful for bit field manipulations
//
// STUPID HACK FOR CL 19434692. Will revert when fix CL is delivered bfm -> chips_a.
#ifndef BIT
#define BIT(b) (1U<<(b))
#endif
#ifndef BIT32
#define BIT32(b) ((NvU32)1U<<(b))
#endif
#ifndef BIT64
#define BIT64(b) ((NvU64)1U<<(b))
#endif
#endif
//
// It is recommended to use the following bit macros to avoid macro name
// collisions with other src code bases.
//
// Miscellaneous macros useful for bit field manipulations.
#ifndef NVBIT
#define NVBIT(b) (1U<<(b))
#define NVBIT(b) (1U<<(b))
#endif
#ifndef NVBIT_TYPE
#define NVBIT_TYPE(b, t) (((t)1U)<<(b))
#define NVBIT_TYPE(b, t) (((t)1U)<<(b))
#endif
#ifndef NVBIT32
#define NVBIT32(b) NVBIT_TYPE(b, NvU32)
#define NVBIT32(b) NVBIT_TYPE(b, NvU32)
#endif
#ifndef NVBIT64
#define NVBIT64(b) NVBIT_TYPE(b, NvU64)
#define NVBIT64(b) NVBIT_TYPE(b, NvU64)
#endif
//Concatenate 2 32bit values to a 64bit value
#define NV_CONCAT_32_TO_64(hi, lo) ((((NvU64)hi) << 32) | ((NvU64)lo))
// Helper macro's for 32 bit bitmasks
#define NV_BITMASK32_ELEMENT_SIZE (sizeof(NvU32) << 3)
#define NV_BITMASK32_IDX(chId) (((chId) & ~(0x1F)) >> 5)
#define NV_BITMASK32_IDX(chId) (((chId) & ~(0x1F)) >> 5)
#define NV_BITMASK32_OFFSET(chId) ((chId) & (0x1F))
#define NV_BITMASK32_SET(pChannelMask, chId) \
(pChannelMask)[NV_BITMASK32_IDX(chId)] |= NVBIT(NV_BITMASK32_OFFSET(chId))
@@ -494,6 +477,23 @@ do \
//
#define NV_TWO_N_MINUS_ONE(n) (((1ULL<<(n/2))<<((n+1)/2))-1)
//
// Create a 64b bitmask with n bits set
// This is the same as ((1ULL<<n) - 1), but it doesn't overflow for n=64
//
// ...
// n=-1, 0x0000000000000000
// n=0, 0x0000000000000000
// n=1, 0x0000000000000001
// ...
// n=63, 0x7FFFFFFFFFFFFFFF
// n=64, 0xFFFFFFFFFFFFFFFF
// n=65, 0xFFFFFFFFFFFFFFFF
// n=66, 0xFFFFFFFFFFFFFFFF
// ...
//
#define NV_BITMASK64(n) ((n<1) ? 0ULL : (NV_U64_MAX>>((n>64) ? 0 : (64-n))))
#define DRF_READ_1WORD_BS(d,r,f,v) \
((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS(NV##d##r##f,(v)): \
((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS(NV##d##r##f,(v)): \
@@ -574,6 +574,12 @@ nvMaskPos32(const NvU32 mask, const NvU32 bitIdx)
n32 = BIT_IDX_32(LOWESTBIT(n32));\
}
// Destructive operation on n64
#define LOWESTBITIDX_64(n64) \
{ \
n64 = BIT_IDX_64(LOWESTBIT(n64));\
}
// Destructive operation on n32
#define HIGHESTBITIDX_32(n32) \
{ \
@@ -585,6 +591,17 @@ nvMaskPos32(const NvU32 mask, const NvU32 bitIdx)
n32 = count; \
}
// Destructive operation on n64
#define HIGHESTBITIDX_64(n64) \
{ \
NvU64 count = 0; \
while (n64 >>= 1) \
{ \
count++; \
} \
n64 = count; \
}
// Destructive operation on n32
#define ROUNDUP_POW2(n32) \
{ \
@@ -694,6 +711,35 @@ nvPrevPow2_U64(const NvU64 x )
} \
}
/*!
* Returns the position of nth set bit in the given mask.
*
* Returns -1 if mask has fewer than n bits set.
*
* n is 0 indexed and has valid values 0..31 inclusive, so "zeroth" set bit is
* the first set LSB.
*
* Example, if mask = 0x000000F0u and n = 1, the return value will be 5.
* Example, if mask = 0x000000F0u and n = 4, the return value will be -1.
*/
static NV_FORCEINLINE NvS32
nvGetNthSetBitIndex32(NvU32 mask, NvU32 n)
{
NvU32 seenSetBitsCount = 0;
NvS32 index;
FOR_EACH_INDEX_IN_MASK(32, index, mask)
{
if (seenSetBitsCount == n)
{
return index;
}
++seenSetBitsCount;
}
FOR_EACH_INDEX_IN_MASK_END;
return -1;
}
//
// Size to use when declaring variable-sized arrays
//
@@ -737,12 +783,15 @@ nvPrevPow2_U64(const NvU64 x )
// Returns the offset (in bytes) of 'member' in struct 'type'.
#ifndef NV_OFFSETOF
#if defined(__GNUC__) && (__GNUC__ > 3)
#define NV_OFFSETOF(type, member) ((NvU32)__builtin_offsetof(type, member))
#define NV_OFFSETOF(type, member) ((NvUPtr) __builtin_offsetof(type, member))
#else
#define NV_OFFSETOF(type, member) ((NvU32)(NvU64)&(((type *)0)->member)) // shouldn't we use PtrToUlong? But will need to include windows header.
#define NV_OFFSETOF(type, member) ((NvUPtr) &(((type *)0)->member))
#endif
#endif
// Given a pointer and the member it is of the parent struct, return a pointer to the parent struct
#define NV_CONTAINEROF(ptr, type, member) ((type *) (((NvUPtr) ptr) - NV_OFFSETOF(type, member)))
//
// Performs a rounded division of b into a (unsigned). For SIGNED version of
// NV_ROUNDED_DIV() macro check the comments in bug 769777.
@@ -918,6 +967,30 @@ static NV_FORCEINLINE void *NV_NVUPTR_TO_PTR(NvUPtr address)
// Use (lo) if (b) is less than 64, and (hi) if >= 64.
//
#define NV_BIT_SET_128(b, lo, hi) { nvAssert( (b) < 128 ); if ( (b) < 64 ) (lo) |= NVBIT64(b); else (hi) |= NVBIT64( b & 0x3F ); }
//
// Clear the bit at pos (b) for U64 which is < 128.
// Use (lo) if (b) is less than 64, and (hi) if >= 64.
//
#define NV_BIT_CLEAR_128(b, lo, hi) { nvAssert( (b) < 128 ); if ( (b) < 64 ) (lo) &= ~NVBIT64(b); else (hi) &= ~NVBIT64( b & 0x3F ); }
// Get the number of elements the specified fixed-size array
#define NV_ARRAY_ELEMENTS(x) ((sizeof(x)/sizeof((x)[0])))
#if !defined(NVIDIA_UNDEF_LEGACY_BIT_MACROS)
//
// Deprecated macros whose definition can be removed once the code base no longer references them.
// Use the NVBIT* macros instead of these macros.
//
#ifndef BIT
#define BIT(b) (1U<<(b))
#endif
#ifndef BIT32
#define BIT32(b) ((NvU32)1U<<(b))
#endif
#ifndef BIT64
#define BIT64(b) ((NvU64)1U<<(b))
#endif
#endif
#ifdef __cplusplus
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2014-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -150,6 +150,21 @@ NV_STATUS_CODE(NV_ERR_NVLINK_CONFIGURATION_ERROR, 0x00000078, "Nvlink Confi
NV_STATUS_CODE(NV_ERR_RISCV_ERROR, 0x00000079, "Generic RISC-V assert or halt")
NV_STATUS_CODE(NV_ERR_FABRIC_MANAGER_NOT_PRESENT, 0x0000007A, "Fabric Manager is not loaded")
NV_STATUS_CODE(NV_ERR_ALREADY_SIGNALLED, 0x0000007B, "Semaphore Surface value already >= requested wait value")
NV_STATUS_CODE(NV_ERR_QUEUE_TASK_SLOT_NOT_AVAILABLE, 0x0000007C, "PMU RPC error due to no queue slot available for this event")
NV_STATUS_CODE(NV_ERR_KEY_ROTATION_IN_PROGRESS, 0x0000007D, "Operation not allowed as key rotation is in progress")
NV_STATUS_CODE(NV_ERR_TEST_ONLY_CODE_NOT_ENABLED, 0x0000007E, "Test-only code path not enabled")
NV_STATUS_CODE(NV_ERR_SECURE_BOOT_FAILED, 0x0000007F, "GFW secure boot failed")
NV_STATUS_CODE(NV_ERR_INSUFFICIENT_ZBC_ENTRY, 0x00000080, "No more ZBC entry for the client")
NV_STATUS_CODE(NV_ERR_NVLINK_FABRIC_NOT_READY, 0x00000081, "Nvlink Fabric Status or Fabric Probe is not yet complete, caller needs to retry")
NV_STATUS_CODE(NV_ERR_NVLINK_FABRIC_FAILURE, 0x00000082, "Nvlink Fabric Probe failed")
NV_STATUS_CODE(NV_ERR_GPU_MEMORY_ONLINING_FAILURE, 0x00000083, "GPU Memory Onlining failed")
NV_STATUS_CODE(NV_ERR_REDUCTION_MANAGER_NOT_AVAILABLE, 0x00000084, "Reduction Manager is not available")
NV_STATUS_CODE(NV_ERR_THRESHOLD_CROSSED, 0x00000085, "A fatal threshold has been crossed")
NV_STATUS_CODE(NV_ERR_RESOURCE_RETIREMENT_ERROR, 0x00000086, "An error occurred while trying to retire a resource")
NV_STATUS_CODE(NV_ERR_FABRIC_STATE_OUT_OF_SYNC, 0x00000087, "NVLink fabric state cached by the driver is out of sync")
NV_STATUS_CODE(NV_ERR_BUFFER_FULL, 0x00000088, "Buffer is full")
NV_STATUS_CODE(NV_ERR_BUFFER_EMPTY, 0x00000089, "Buffer is empty")
NV_STATUS_CODE(NV_ERR_MC_FLA_OFFSET_TABLE_FULL, 0x0000008A, "Multicast FLA offset table has no available slots")
// Warnings:
NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch")
@@ -160,5 +175,6 @@ NV_STATUS_CODE(NV_WARN_MORE_PROCESSING_REQUIRED, 0x00010005, "WARNING More
NV_STATUS_CODE(NV_WARN_NOTHING_TO_DO, 0x00010006, "WARNING Nothing to do")
NV_STATUS_CODE(NV_WARN_NULL_OBJECT, 0x00010007, "WARNING NULL object found")
NV_STATUS_CODE(NV_WARN_OUT_OF_RANGE, 0x00010008, "WARNING value out of range")
NV_STATUS_CODE(NV_WARN_THRESHOLD_CROSSED, 0x00010009, "WARNING Threshold has been crossed")
#endif /* SDK_NVSTATUSCODES_H */

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1993-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -24,10 +24,6 @@
#ifndef NVTYPES_INCLUDED
#define NVTYPES_INCLUDED
#ifdef __cplusplus
extern "C" {
#endif
#include "cpuopsys.h"
#ifndef NVTYPES_USE_STDINT
@@ -55,6 +51,10 @@ extern "C" {
#endif
#endif // __cplusplus
#ifdef __cplusplus
extern "C" {
#endif
#if defined(MAKE_NV64TYPES_8BYTES_ALIGNED) && defined(__i386__)
// ensure or force 8-bytes alignment of NV 64-bit types
#define OPTIONAL_ALIGN8_ATTR __attribute__((aligned(8)))
@@ -145,7 +145,18 @@ typedef signed short NvS16; /* -32768 to 32767 */
#endif
// Macro to build an NvU32 from four bytes, listed from msb to lsb
#define NvU32_BUILD(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
#define NvU32_BUILD(a, b, c, d) \
((NvU32)( \
(((NvU32)(a) & 0xff) << 24) | \
(((NvU32)(b) & 0xff) << 16) | \
(((NvU32)(c) & 0xff) << 8) | \
(((NvU32)(d) & 0xff))))
// Macro to build an NvU64 from two DWORDS, listed from msb to lsb
#define NvU64_BUILD(a, b) \
((NvU64)( \
(((NvU64)(a) & ~0U) << 32) | \
(((NvU64)(b) & ~0U))))
#if NVTYPES_USE_STDINT
typedef uint32_t NvV32; /* "void": enumerated or multiple fields */

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -40,8 +40,11 @@
#include "nv_stdarg.h"
#include <nv-kernel-interface-api.h>
#include <os/nv_memory_type.h>
#include <os/nv_memory_area.h>
#include <nv-caps.h>
#include "rs_access.h"
typedef struct
@@ -67,148 +70,175 @@ typedef struct os_wait_queue os_wait_queue;
* ---------------------------------------------------------------------------
*/
NvU64 NV_API_CALL os_get_num_phys_pages (void);
NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64);
void NV_API_CALL os_free_mem (void *);
NV_STATUS NV_API_CALL os_get_current_time (NvU32 *, NvU32 *);
NvU64 NV_API_CALL os_get_current_tick (void);
NvU64 NV_API_CALL os_get_current_tick_hr (void);
NvU64 NV_API_CALL os_get_tick_resolution (void);
NV_STATUS NV_API_CALL os_delay (NvU32);
NV_STATUS NV_API_CALL os_delay_us (NvU32);
NvU64 NV_API_CALL os_get_cpu_frequency (void);
NvU32 NV_API_CALL os_get_current_process (void);
void NV_API_CALL os_get_current_process_name (char *, NvU32);
NV_STATUS NV_API_CALL os_get_current_thread (NvU64 *);
char* NV_API_CALL os_string_copy (char *, const char *);
NvU32 NV_API_CALL os_string_length (const char *);
NvU32 NV_API_CALL os_strtoul (const char *, char **, NvU32);
NvS32 NV_API_CALL os_string_compare (const char *, const char *);
NvS32 NV_API_CALL os_snprintf (char *, NvU32, const char *, ...);
NvS32 NV_API_CALL os_vsnprintf (char *, NvU32, const char *, va_list);
void NV_API_CALL os_log_error (const char *, va_list);
void* NV_API_CALL os_mem_copy (void *, const void *, NvU32);
NV_STATUS NV_API_CALL os_memcpy_from_user (void *, const void *, NvU32);
NV_STATUS NV_API_CALL os_memcpy_to_user (void *, const void *, NvU32);
void* NV_API_CALL os_mem_set (void *, NvU8, NvU32);
NvS32 NV_API_CALL os_mem_cmp (const NvU8 *, const NvU8 *, NvU32);
void* NV_API_CALL os_pci_init_handle (NvU32, NvU8, NvU8, NvU8, NvU16 *, NvU16 *);
NV_STATUS NV_API_CALL os_pci_read_byte (void *, NvU32, NvU8 *);
NV_STATUS NV_API_CALL os_pci_read_word (void *, NvU32, NvU16 *);
NV_STATUS NV_API_CALL os_pci_read_dword (void *, NvU32, NvU32 *);
NV_STATUS NV_API_CALL os_pci_write_byte (void *, NvU32, NvU8);
NV_STATUS NV_API_CALL os_pci_write_word (void *, NvU32, NvU16);
NV_STATUS NV_API_CALL os_pci_write_dword (void *, NvU32, NvU32);
NvBool NV_API_CALL os_pci_remove_supported (void);
void NV_API_CALL os_pci_remove (void *);
void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32);
void NV_API_CALL os_unmap_kernel_space (void *, NvU64);
void* NV_API_CALL os_map_user_space (NvU64, NvU64, NvU32, NvU32, void **);
void NV_API_CALL os_unmap_user_space (void *, NvU64, void *);
NV_STATUS NV_API_CALL os_flush_cpu_cache (void);
NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void);
NV_STATUS NV_API_CALL os_flush_user_cache (void);
NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64);
void NV_API_CALL os_free_mem (void *);
NV_STATUS NV_API_CALL os_get_system_time (NvU32 *, NvU32 *);
NvU64 NV_API_CALL os_get_monotonic_time_ns (void);
NvU64 NV_API_CALL os_get_monotonic_time_ns_hr (void);
NvU64 NV_API_CALL os_get_monotonic_tick_resolution_ns (void);
NV_STATUS NV_API_CALL os_delay (NvU32);
NV_STATUS NV_API_CALL os_delay_us (NvU32);
NvU64 NV_API_CALL os_get_cpu_frequency (void);
NvU32 NV_API_CALL os_get_current_process (void);
void NV_API_CALL os_get_current_process_name (char *, NvU32);
NV_STATUS NV_API_CALL os_get_current_thread (NvU64 *);
char* NV_API_CALL os_string_copy (char *, const char *);
NvU32 NV_API_CALL os_string_length (const char *);
NvU32 NV_API_CALL os_strtoul (const char *, char **, NvU32);
NvS32 NV_API_CALL os_string_compare (const char *, const char *);
NvS32 NV_API_CALL os_snprintf (char *, NvU32, const char *, ...);
NvS32 NV_API_CALL os_vsnprintf (char *, NvU32, const char *, va_list);
void NV_API_CALL os_log_error (const char *, va_list);
void* NV_API_CALL os_mem_copy (void *, const void *, NvU32);
NV_STATUS NV_API_CALL os_memcpy_from_user (void *, const void *, NvU32);
NV_STATUS NV_API_CALL os_memcpy_to_user (void *, const void *, NvU32);
void* NV_API_CALL os_mem_set (void *, NvU8, NvU32);
NvS32 NV_API_CALL os_mem_cmp (const NvU8 *, const NvU8 *, NvU32);
void* NV_API_CALL os_pci_init_handle (NvU32, NvU8, NvU8, NvU8, NvU16 *, NvU16 *);
NV_STATUS NV_API_CALL os_pci_read_byte (void *, NvU32, NvU8 *);
NV_STATUS NV_API_CALL os_pci_read_word (void *, NvU32, NvU16 *);
NV_STATUS NV_API_CALL os_pci_read_dword (void *, NvU32, NvU32 *);
NV_STATUS NV_API_CALL os_pci_write_byte (void *, NvU32, NvU8);
NV_STATUS NV_API_CALL os_pci_write_word (void *, NvU32, NvU16);
NV_STATUS NV_API_CALL os_pci_write_dword (void *, NvU32, NvU32);
NvBool NV_API_CALL os_pci_remove_supported (void);
void NV_API_CALL os_pci_remove (void *);
void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32);
void NV_API_CALL os_unmap_kernel_space (void *, NvU64);
#if defined(NV_VMWARE)
void* NV_API_CALL os_map_user_space (MemoryArea *, NvU32, NvU32, void **);
void NV_API_CALL os_unmap_user_space (void *, NvU64, void *);
#endif
NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void);
NV_STATUS NV_API_CALL os_flush_user_cache (void);
void NV_API_CALL os_flush_cpu_write_combine_buffer(void);
NvU8 NV_API_CALL os_io_read_byte (NvU32);
NvU16 NV_API_CALL os_io_read_word (NvU32);
NvU32 NV_API_CALL os_io_read_dword (NvU32);
void NV_API_CALL os_io_write_byte (NvU32, NvU8);
void NV_API_CALL os_io_write_word (NvU32, NvU16);
void NV_API_CALL os_io_write_dword (NvU32, NvU32);
NvBool NV_API_CALL os_is_administrator (void);
NvBool NV_API_CALL os_allow_priority_override (void);
void NV_API_CALL os_dbg_init (void);
void NV_API_CALL os_dbg_breakpoint (void);
void NV_API_CALL os_dbg_set_level (NvU32);
NvU32 NV_API_CALL os_get_cpu_count (void);
NvU32 NV_API_CALL os_get_cpu_number (void);
void NV_API_CALL os_disable_console_access (void);
void NV_API_CALL os_enable_console_access (void);
NV_STATUS NV_API_CALL os_registry_init (void);
NvU64 NV_API_CALL os_get_max_user_va (void);
NV_STATUS NV_API_CALL os_schedule (void);
NV_STATUS NV_API_CALL os_alloc_spinlock (void **);
void NV_API_CALL os_free_spinlock (void *);
NvU64 NV_API_CALL os_acquire_spinlock (void *);
void NV_API_CALL os_release_spinlock (void *, NvU64);
NV_STATUS NV_API_CALL os_queue_work_item (struct os_work_queue *, void *);
NV_STATUS NV_API_CALL os_flush_work_queue (struct os_work_queue *);
NV_STATUS NV_API_CALL os_alloc_mutex (void **);
void NV_API_CALL os_free_mutex (void *);
NV_STATUS NV_API_CALL os_acquire_mutex (void *);
NV_STATUS NV_API_CALL os_cond_acquire_mutex (void *);
void NV_API_CALL os_release_mutex (void *);
void* NV_API_CALL os_alloc_semaphore (NvU32);
void NV_API_CALL os_free_semaphore (void *);
NV_STATUS NV_API_CALL os_acquire_semaphore (void *);
NV_STATUS NV_API_CALL os_cond_acquire_semaphore (void *);
NV_STATUS NV_API_CALL os_release_semaphore (void *);
void* NV_API_CALL os_alloc_rwlock (void);
void NV_API_CALL os_free_rwlock (void *);
NV_STATUS NV_API_CALL os_acquire_rwlock_read (void *);
NV_STATUS NV_API_CALL os_acquire_rwlock_write (void *);
NV_STATUS NV_API_CALL os_cond_acquire_rwlock_read (void *);
NV_STATUS NV_API_CALL os_cond_acquire_rwlock_write(void *);
void NV_API_CALL os_release_rwlock_read (void *);
void NV_API_CALL os_release_rwlock_write (void *);
NvBool NV_API_CALL os_semaphore_may_sleep (void);
NV_STATUS NV_API_CALL os_get_version_info (os_version_info*);
NvBool NV_API_CALL os_is_isr (void);
NvBool NV_API_CALL os_pat_supported (void);
void NV_API_CALL os_dump_stack (void);
NvBool NV_API_CALL os_is_efi_enabled (void);
NvBool NV_API_CALL os_is_xen_dom0 (void);
NvBool NV_API_CALL os_is_vgx_hyper (void);
NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32);
NvBool NV_API_CALL os_is_grid_supported (void);
NvU32 NV_API_CALL os_get_grid_csp_support (void);
void NV_API_CALL os_get_screen_info (NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU32 *, NvU64, NvU64);
void NV_API_CALL os_bug_check (NvU32, const char *);
NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32);
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **, void**);
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *);
NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *);
NV_STATUS NV_API_CALL os_get_euid (NvU32 *);
NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr);
NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *);
void NV_API_CALL os_add_record_for_crashLog (void *, NvU32);
void NV_API_CALL os_delete_record_for_crashLog (void *);
NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32);
NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *);
NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *);
NV_STATUS NV_API_CALL os_get_page (NvU64 address);
NV_STATUS NV_API_CALL os_put_page (NvU64 address);
NvU32 NV_API_CALL os_get_page_refcount (NvU64 address);
NvU32 NV_API_CALL os_count_tail_pages (NvU64 address);
void NV_API_CALL os_free_pages_phys (NvU64, NvU32);
NV_STATUS NV_API_CALL os_open_temporary_file (void **);
void NV_API_CALL os_close_file (void *);
NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64);
NV_STATUS NV_API_CALL os_read_file (void *, NvU8 *, NvU64, NvU64);
NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **);
NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64);
NvBool NV_API_CALL os_is_nvswitch_present (void);
NV_STATUS NV_API_CALL os_get_random_bytes (NvU8 *, NvU16);
NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **);
void NV_API_CALL os_free_wait_queue (os_wait_queue *);
void NV_API_CALL os_wait_uninterruptible (os_wait_queue *);
void NV_API_CALL os_wait_interruptible (os_wait_queue *);
void NV_API_CALL os_wake_up (os_wait_queue *);
nv_cap_t* NV_API_CALL os_nv_cap_init (const char *);
nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry (nv_cap_t *, const char *, int);
nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry (nv_cap_t *, const char *, int);
void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *);
int NV_API_CALL os_nv_cap_validate_and_dup_fd(const nv_cap_t *, int);
void NV_API_CALL os_nv_cap_close_fd (int);
NvU8 NV_API_CALL os_io_read_byte (NvU32);
NvU16 NV_API_CALL os_io_read_word (NvU32);
NvU32 NV_API_CALL os_io_read_dword (NvU32);
void NV_API_CALL os_io_write_byte (NvU32, NvU8);
void NV_API_CALL os_io_write_word (NvU32, NvU16);
void NV_API_CALL os_io_write_dword (NvU32, NvU32);
NvBool NV_API_CALL os_is_administrator (void);
NvBool NV_API_CALL os_check_access (RsAccessRight accessRight);
void NV_API_CALL os_dbg_init (void);
void NV_API_CALL os_dbg_breakpoint (void);
void NV_API_CALL os_dbg_set_level (NvU32);
NvU32 NV_API_CALL os_get_cpu_count (void);
NvU32 NV_API_CALL os_get_cpu_number (void);
void NV_API_CALL os_disable_console_access (void);
void NV_API_CALL os_enable_console_access (void);
NV_STATUS NV_API_CALL os_registry_init (void);
NvU64 NV_API_CALL os_get_max_user_va (void);
NV_STATUS NV_API_CALL os_schedule (void);
NV_STATUS NV_API_CALL os_alloc_spinlock (void **);
void NV_API_CALL os_free_spinlock (void *);
NvU64 NV_API_CALL os_acquire_spinlock (void *);
void NV_API_CALL os_release_spinlock (void *, NvU64);
NV_STATUS NV_API_CALL os_queue_work_item (struct os_work_queue *, void *);
NV_STATUS NV_API_CALL os_flush_work_queue (struct os_work_queue *, NvBool);
NvBool NV_API_CALL os_is_queue_flush_ongoing (struct os_work_queue *);
NV_STATUS NV_API_CALL os_alloc_mutex (void **);
void NV_API_CALL os_free_mutex (void *);
NV_STATUS NV_API_CALL os_acquire_mutex (void *);
NV_STATUS NV_API_CALL os_cond_acquire_mutex (void *);
void NV_API_CALL os_release_mutex (void *);
void* NV_API_CALL os_alloc_semaphore (NvU32);
void NV_API_CALL os_free_semaphore (void *);
NV_STATUS NV_API_CALL os_acquire_semaphore (void *);
NV_STATUS NV_API_CALL os_cond_acquire_semaphore (void *);
NV_STATUS NV_API_CALL os_release_semaphore (void *);
void* NV_API_CALL os_alloc_rwlock (void);
void NV_API_CALL os_free_rwlock (void *);
NV_STATUS NV_API_CALL os_acquire_rwlock_read (void *);
NV_STATUS NV_API_CALL os_acquire_rwlock_write (void *);
NV_STATUS NV_API_CALL os_cond_acquire_rwlock_read (void *);
NV_STATUS NV_API_CALL os_cond_acquire_rwlock_write (void *);
void NV_API_CALL os_release_rwlock_read (void *);
void NV_API_CALL os_release_rwlock_write (void *);
NvBool NV_API_CALL os_semaphore_may_sleep (void);
NV_STATUS NV_API_CALL os_get_version_info (os_version_info*);
NV_STATUS NV_API_CALL os_get_is_openrm (NvBool *);
NvBool NV_API_CALL os_is_isr (void);
NvBool NV_API_CALL os_pat_supported (void);
void NV_API_CALL os_dump_stack (void);
NvBool NV_API_CALL os_is_efi_enabled (void);
NvBool NV_API_CALL os_is_xen_dom0 (void);
NvBool NV_API_CALL os_is_vgx_hyper (void);
NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32);
NvBool NV_API_CALL os_is_grid_supported (void);
NvU32 NV_API_CALL os_get_grid_csp_support (void);
void NV_API_CALL os_bug_check (NvU32, const char *);
NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32);
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **);
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *, NvU32);
NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *);
NV_STATUS NV_API_CALL os_get_euid (NvU32 *);
NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr);
NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *);
void NV_API_CALL os_add_record_for_crashLog (void *, NvU32);
void NV_API_CALL os_delete_record_for_crashLog (void *);
NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32);
NV_STATUS NV_API_CALL os_device_vm_present (void);
NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *);
NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *);
NV_STATUS NV_API_CALL os_get_page (NvU64 address);
NV_STATUS NV_API_CALL os_put_page (NvU64 address);
NvU32 NV_API_CALL os_get_page_refcount (NvU64 address);
NvU32 NV_API_CALL os_count_tail_pages (NvU64 address);
void NV_API_CALL os_free_pages_phys (NvU64, NvU32);
NV_STATUS NV_API_CALL os_open_temporary_file (void **);
void NV_API_CALL os_close_file (void *);
NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64);
NV_STATUS NV_API_CALL os_read_file (void *, NvU8 *, NvU64, NvU64);
NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **);
NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64);
NvBool NV_API_CALL os_is_nvswitch_present (void);
NV_STATUS NV_API_CALL os_get_random_bytes (NvU8 *, NvU16);
NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **);
void NV_API_CALL os_free_wait_queue (os_wait_queue *);
void NV_API_CALL os_wait_uninterruptible (os_wait_queue *);
void NV_API_CALL os_wait_interruptible (os_wait_queue *);
void NV_API_CALL os_wake_up (os_wait_queue *);
nv_cap_t* NV_API_CALL os_nv_cap_init (const char *);
nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry (nv_cap_t *, const char *, int);
nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry (nv_cap_t *, const char *, int);
void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *);
int NV_API_CALL os_nv_cap_validate_and_dup_fd (const nv_cap_t *, int);
void NV_API_CALL os_nv_cap_close_fd (int);
NvS32 NV_API_CALL os_imex_channel_get (NvU64);
NvS32 NV_API_CALL os_imex_channel_count (void);
NV_STATUS NV_API_CALL os_tegra_igpu_perf_boost (void *, NvBool, NvU32);
NV_STATUS NV_API_CALL os_get_tegra_platform (NvU32 *);
enum os_pci_req_atomics_type {
OS_INTF_PCIE_REQ_ATOMICS_32BIT,
OS_INTF_PCIE_REQ_ATOMICS_64BIT,
OS_INTF_PCIE_REQ_ATOMICS_128BIT
};
NV_STATUS NV_API_CALL os_enable_pci_req_atomics (void *, enum os_pci_req_atomics_type);
void NV_API_CALL os_pci_trigger_flr(void *handle);
NV_STATUS NV_API_CALL os_get_numa_node_memory_usage (NvS32, NvU64 *, NvU64 *);
NV_STATUS NV_API_CALL os_numa_add_gpu_memory (void *, NvU64, NvU64, NvU32 *);
NV_STATUS NV_API_CALL os_numa_remove_gpu_memory (void *, NvU64, NvU64, NvU32);
NV_STATUS NV_API_CALL os_offline_page_at_address(NvU64 address);
void* NV_API_CALL os_get_pid_info(void);
void NV_API_CALL os_put_pid_info(void *pid_info);
NV_STATUS NV_API_CALL os_find_ns_pid(void *pid_info, NvU32 *ns_pid);
NvBool NV_API_CALL os_is_init_ns(void);
NV_STATUS NV_API_CALL os_iommu_sva_bind(void *arg, void **handle, NvU32 *pasid);
void NV_API_CALL os_iommu_sva_unbind(void *handle);
extern NvU32 os_page_size;
extern NvU64 os_page_size;
extern NvU64 os_max_page_size;
extern NvU64 os_page_mask;
extern NvU8 os_page_shift;
extern NvBool os_cc_enabled;
extern NvBool os_cc_sev_snp_enabled;
extern NvBool os_cc_sme_enabled;
extern NvBool os_cc_snp_vtom_enabled;
extern NvBool os_cc_tdx_enabled;
extern NvBool os_dma_buf_enabled;
extern NvBool os_imex_channel_is_supported;
/*
* ---------------------------------------------------------------------------

View File

@@ -0,0 +1,104 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_MEMORY_AREA_H
#define NV_MEMORY_AREA_H
typedef struct MemoryRange
{
NvU64 start;
NvU64 size;
} MemoryRange;
typedef struct MemoryArea
{
MemoryRange *pRanges;
NvU64 numRanges;
} MemoryArea;
static inline NvU64 memareaSize(MemoryArea memArea)
{
NvU64 size = 0;
NvU64 idx = 0;
for (idx = 0; idx < memArea.numRanges; idx++)
{
size += memArea.pRanges[idx].size;
}
return size;
}
static inline MemoryRange
mrangeMake
(
NvU64 start,
NvU64 size
)
{
MemoryRange range;
range.start = start;
range.size = size;
return range;
}
static inline NvU64
mrangeLimit
(
MemoryRange a
)
{
return a.start + a.size;
}
static inline NvBool
mrangeIntersects
(
MemoryRange a,
MemoryRange b
)
{
return ((a.start >= b.start) && (a.start < mrangeLimit(b))) ||
((b.start >= a.start) && (b.start < mrangeLimit(a)));
}
static inline NvBool
mrangeContains
(
MemoryRange outer,
MemoryRange inner
)
{
return (inner.start >= outer.start) && (mrangeLimit(inner) <= mrangeLimit(outer));
}
static inline MemoryRange
mrangeOffset
(
MemoryRange range,
NvU64 amt
)
{
range.start += amt;
return range;
}
#endif /* NV_MEMORY_AREA_H */

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2020 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,6 +27,8 @@ typedef enum
{
NV_OS_GPIO_FUNC_HOTPLUG_A,
NV_OS_GPIO_FUNC_HOTPLUG_B,
NV_OS_GPIO_FUNC_HOTPLUG_C,
NV_OS_GPIO_FUNC_HOTPLUG_D,
} NV_OS_GPIO_FUNC_NAMES;
#endif

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -37,7 +37,7 @@ NV_STATUS NV_API_CALL rm_gpu_ops_create_session (nvidia_stack_t *, nvgpuSessio
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_session (nvidia_stack_t *, nvgpuSessionHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_device_create (nvidia_stack_t *, nvgpuSessionHandle_t, const nvgpuInfo_t *, const NvProcessorUuid *, nvgpuDeviceHandle_t *, NvBool);
NV_STATUS NV_API_CALL rm_gpu_ops_device_destroy (nvidia_stack_t *, nvgpuDeviceHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_create(nvidia_stack_t *, nvgpuDeviceHandle_t, unsigned long long, unsigned long long, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_create(nvidia_stack_t *, nvgpuDeviceHandle_t, unsigned long long, unsigned long long, NvBool, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_dup_address_space(nvidia_stack_t *, nvgpuDeviceHandle_t, NvHandle, NvHandle, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_destroy(nvidia_stack_t *, nvgpuAddressSpaceHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_fb(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvLength, NvU64 *, nvgpuAllocInfo_t);
@@ -45,7 +45,6 @@ NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_fb(nvidia_stack_t *, nvgpuAddres
NV_STATUS NV_API_CALL rm_gpu_ops_pma_alloc_pages(nvidia_stack_t *, void *, NvLength, NvU32 , nvgpuPmaAllocationOptions_t, NvU64 *);
NV_STATUS NV_API_CALL rm_gpu_ops_pma_free_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32, NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_pma_pin_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32, NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_pma_unpin_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_get_pma_object(nvidia_stack_t *, nvgpuDeviceHandle_t, void **, const nvgpuPmaStatistics_t *);
NV_STATUS NV_API_CALL rm_gpu_ops_pma_register_callbacks(nvidia_stack_t *sp, void *, nvPmaEvictPagesCallback, nvPmaEvictRangeCallback, void *);
void NV_API_CALL rm_gpu_ops_pma_unregister_callbacks(nvidia_stack_t *sp, void *);
@@ -76,18 +75,21 @@ NV_STATUS NV_API_CALL rm_gpu_ops_own_page_fault_intr(nvidia_stack_t *, nvgpuDevi
NV_STATUS NV_API_CALL rm_gpu_ops_init_fault_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFaultInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_fault_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFaultInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_get_non_replayable_faults(nvidia_stack_t *, nvgpuFaultInfo_t, void *, NvU32 *);
NV_STATUS NV_API_CALL rm_gpu_ops_flush_replayable_fault_buffer(nvidia_stack_t *, nvgpuDeviceHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_flush_replayable_fault_buffer(nvidia_stack_t *, nvgpuFaultInfo_t, NvBool);
NV_STATUS NV_API_CALL rm_gpu_ops_toggle_prefetch_faults(nvidia_stack_t *, nvgpuFaultInfo_t, NvBool);
NV_STATUS NV_API_CALL rm_gpu_ops_has_pending_non_replayable_faults(nvidia_stack_t *, nvgpuFaultInfo_t, NvBool *);
NV_STATUS NV_API_CALL rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_own_access_cntr_intr(nvidia_stack_t *, nvgpuSessionHandle_t, nvgpuAccessCntrInfo_t, NvBool);
NV_STATUS NV_API_CALL rm_gpu_ops_enable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, nvgpuAccessCntrConfig_t);
NV_STATUS NV_API_CALL rm_gpu_ops_enable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, const nvgpuAccessCntrConfig_t *);
NV_STATUS NV_API_CALL rm_gpu_ops_disable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_set_page_directory (nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, unsigned, NvBool, NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_set_page_directory (nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, unsigned, NvBool, NvU32, NvU64 *);
NV_STATUS NV_API_CALL rm_gpu_ops_unset_page_directory (nvidia_stack_t *, nvgpuAddressSpaceHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_get_nvlink_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuNvlinkInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_p2p_object_create(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuDeviceHandle_t, NvHandle *);
void NV_API_CALL rm_gpu_ops_p2p_object_destroy(nvidia_stack_t *, nvgpuSessionHandle_t, NvHandle);
NV_STATUS NV_API_CALL rm_gpu_ops_get_external_alloc_ptes(nvidia_stack_t*, nvgpuAddressSpaceHandle_t, NvHandle, NvU64, NvU64, nvgpuExternalMappingInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_get_external_alloc_phys_addrs(nvidia_stack_t*, nvgpuAddressSpaceHandle_t, NvHandle, NvU64, NvU64, nvgpuExternalPhysAddrInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_retain_channel(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvHandle, NvHandle, void **, nvgpuChannelInstanceInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_bind_channel_resources(nvidia_stack_t *, void *, nvgpuChannelResourceBindParams_t);
void NV_API_CALL rm_gpu_ops_release_channel(nvidia_stack_t *, void *);
@@ -100,15 +102,18 @@ void NV_API_CALL rm_gpu_ops_paging_channel_destroy(nvidia_stack_t *, nvgpu
NV_STATUS NV_API_CALL rm_gpu_ops_paging_channels_map(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuDeviceHandle_t, NvU64 *);
void NV_API_CALL rm_gpu_ops_paging_channels_unmap(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuDeviceHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *, nvgpuPagingChannelHandle_t, char *, NvU32);
void NV_API_CALL rm_gpu_ops_report_fatal_error(nvidia_stack_t *, NV_STATUS error);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_init(nvidia_stack_t *, struct ccslContext_t **, nvgpuChannelHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_clear(nvidia_stack_t *, struct ccslContext_t *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_key(nvidia_stack_t *, UvmCslContext *[], NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *, NvU8 *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt_with_iv(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8*, NvU8 *, NvU8 *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 const *, NvU8 *, NvU8 const *, NvU32, NvU8 const *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 const *, NvU32, NvU8 *, NvU8 const *, NvU32, NvU8 const *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_sign(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_query_message_pool(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64 *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_increment_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64, NvU8 *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_log_encryption(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU32);
#endif

View File

@@ -0,0 +1,276 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <nvtypes.h>
#if defined(_MSC_VER)
#pragma warning(disable:4324)
#endif
//
// This file was generated with FINN, an NVIDIA coding tool.
// Source file: rs_access.finn
//
#include "nvtypes.h"
#include "nvmisc.h"
/****************************************************************************/
/* Access right definitions */
/****************************************************************************/
//
// The meaning of each access right is documented in
// resman/docs/rmapi/resource_server/rm_capabilities.adoc
//
// RS_ACCESS_COUNT is the number of access rights that have been defined
// and are in use. All integers in the range [0, RS_ACCESS_COUNT) should
// represent valid access rights.
//
// When adding a new access right, don't forget to update
// 1) The descriptions in the resman/docs/rmapi/resource_server/rm_capabilities.adoc
// 2) RS_ACCESS_COUNT, defined below
// 3) The declaration of g_rsAccessMetadata in rs_access_rights.c
// 4) The list of access rights in drivers/common/chip-config/Chipcontrols.pm
// 5) Any relevant access right callbacks
//
#define RS_ACCESS_DUP_OBJECT 0U
#define RS_ACCESS_NICE 1U
#define RS_ACCESS_DEBUG 2U
#define RS_ACCESS_PERFMON 3U
#define RS_ACCESS_COUNT 4U
/****************************************************************************/
/* Access right data structures */
/****************************************************************************/
/*!
* @brief A type that can be used to represent any access right.
*/
typedef NvU16 RsAccessRight;
/*!
* @brief An internal type used to represent one limb in an access right mask.
*/
typedef NvU32 RsAccessLimb;
#define SDK_RS_ACCESS_LIMB_BITS 32
/*!
* @brief The number of limbs in the RS_ACCESS_MASK struct.
*/
#define SDK_RS_ACCESS_MAX_LIMBS 1
/*!
* @brief The maximum number of possible access rights supported by the
* current data structure definition.
*
* You probably want RS_ACCESS_COUNT instead, which is the number of actual
* access rights defined.
*/
#define SDK_RS_ACCESS_MAX_COUNT (0x20) /* finn: Evaluated from "(SDK_RS_ACCESS_LIMB_BITS * SDK_RS_ACCESS_MAX_LIMBS)" */
/**
* @brief A struct representing a set of access rights.
*
* Note that the values of bit positions larger than RS_ACCESS_COUNT is
* undefined, and should not be assumed to be 0 (see RS_ACCESS_MASK_FILL).
*/
typedef struct RS_ACCESS_MASK {
RsAccessLimb limbs[SDK_RS_ACCESS_MAX_LIMBS];
} RS_ACCESS_MASK;
/**
* @brief A struct representing auxiliary information about each access right.
*/
typedef struct RS_ACCESS_INFO {
NvU32 flags;
} RS_ACCESS_INFO;
/****************************************************************************/
/* Access right macros */
/****************************************************************************/
#define SDK_RS_ACCESS_LIMB_INDEX(index) ((index) / SDK_RS_ACCESS_LIMB_BITS)
#define SDK_RS_ACCESS_LIMB_POS(index) ((index) % SDK_RS_ACCESS_LIMB_BITS)
#define SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) \
((pAccessMask)->limbs[SDK_RS_ACCESS_LIMB_INDEX(index)])
#define SDK_RS_ACCESS_OFFSET_MASK(index) \
NVBIT_TYPE(SDK_RS_ACCESS_LIMB_POS(index), RsAccessLimb)
/*!
* @brief Checks that accessRight represents a valid access right.
*
* The valid range of access rights is [0, RS_ACCESS_COUNT).
*
* @param[in] accessRight The access right value to check
*
* @return true if accessRight is valid
* @return false otherwise
*/
#define RS_ACCESS_BOUNDS_CHECK(accessRight) \
(accessRight < RS_ACCESS_COUNT)
/*!
* @brief Test whether an access right is present in a set
*
* @param[in] pAccessMask The set of access rights to read
* @param[in] index The access right to examine
*
* @return NV_TRUE if the access right specified by index was present in the set,
* and NV_FALSE otherwise
*/
#define RS_ACCESS_MASK_TEST(pAccessMask, index) \
(RS_ACCESS_BOUNDS_CHECK(index) && \
(SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) & SDK_RS_ACCESS_OFFSET_MASK(index)) != 0)
/*!
* @brief Add an access right to a mask
*
* @param[in] pAccessMask The set of access rights to modify
* @param[in] index The access right to set
*/
#define RS_ACCESS_MASK_ADD(pAccessMask, index) \
do \
{ \
if (RS_ACCESS_BOUNDS_CHECK(index)) { \
SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) |= SDK_RS_ACCESS_OFFSET_MASK(index); \
} \
} while (NV_FALSE)
/*!
* @brief Remove an access right from a mask
*
* @param[in] pAccessMask The set of access rights to modify
* @param[in] index The access right to unset
*/
#define RS_ACCESS_MASK_REMOVE(pAccessMask, index) \
do \
{ \
if (RS_ACCESS_BOUNDS_CHECK(index)) { \
SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) &= ~SDK_RS_ACCESS_OFFSET_MASK(index); \
} \
} while (NV_FALSE)
/*!
* @brief Performs an in-place union between two access right masks
*
* @param[in,out] pMaskOut The access rights mask to be updated
* @param[in] pMaskIn The set of access rights to be added to pMaskOut
*/
#define RS_ACCESS_MASK_UNION(pMaskOut, pMaskIn) \
do \
{ \
NvLength limb; \
for (limb = 0; limb < SDK_RS_ACCESS_MAX_LIMBS; limb++) \
{ \
SDK_RS_ACCESS_LIMB_ELT(pMaskOut, limb) |= SDK_RS_ACCESS_LIMB_ELT(pMaskIn, limb); \
} \
} while (NV_FALSE)
/*!
* @brief Performs an in-place subtract of one mask's rights from another
*
* @param[in,out] pMaskOut The access rights mask to be updated
* @param[in] pMaskIn The set of access rights to be removed from pMaskOut
*/
#define RS_ACCESS_MASK_SUBTRACT(pMaskOut, pMaskIn) \
do \
{ \
NvLength limb; \
for (limb = 0; limb < SDK_RS_ACCESS_MAX_LIMBS; limb++) \
{ \
SDK_RS_ACCESS_LIMB_ELT(pMaskOut, limb) &= ~SDK_RS_ACCESS_LIMB_ELT(pMaskIn, limb); \
} \
} while (NV_FALSE)
/*!
* @brief Removes all rights from an access rights mask
*
* @param[in,out] pAccessMask The access rights mask to be updated
*/
#define RS_ACCESS_MASK_CLEAR(pAccessMask) \
do \
{ \
portMemSet(pAccessMask, 0, sizeof(*pAccessMask)); \
} while (NV_FALSE)
/*!
* @brief Adds all rights to an access rights mask
*
* @param[in,out] pAccessMask The access rights mask to be updated
*/
#define RS_ACCESS_MASK_FILL(pAccessMask) \
do \
{ \
portMemSet(pAccessMask, 0xff, sizeof(*pAccessMask)); \
} while (NV_FALSE)
/****************************************************************************/
/* Share definitions */
/****************************************************************************/
//
// The usage of Share Policy and the meaning of each share type is documented in
// resman/docs/rmapi/resource_server/rm_capabilities.adoc
//
#define RS_SHARE_TYPE_NONE (0U)
#define RS_SHARE_TYPE_ALL (1U)
#define RS_SHARE_TYPE_OS_SECURITY_TOKEN (2U)
#define RS_SHARE_TYPE_CLIENT (3U)
#define RS_SHARE_TYPE_PID (4U)
#define RS_SHARE_TYPE_SMC_PARTITION (5U)
#define RS_SHARE_TYPE_GPU (6U)
#define RS_SHARE_TYPE_FM_CLIENT (7U)
// Must be last. Update when a new SHARE_TYPE is added
#define RS_SHARE_TYPE_MAX (8U)
//
// Use Revoke to remove an existing policy from the list.
// Allow is based on OR logic, Require is based on AND logic.
// To share a right, at least one Allow (non-Require) must match, and all Require must pass.
// If Compose is specified, policies will be added to the list. Otherwise, they will replace the list.
//
#define RS_SHARE_ACTION_FLAG_REVOKE NVBIT(0)
#define RS_SHARE_ACTION_FLAG_REQUIRE NVBIT(1)
#define RS_SHARE_ACTION_FLAG_COMPOSE NVBIT(2)
/****************************************************************************/
/* Share flag data structures */
/****************************************************************************/
typedef struct RS_SHARE_POLICY {
NvU32 target;
RS_ACCESS_MASK accessMask;
NvU16 type; ///< RS_SHARE_TYPE_
NvU8 action; ///< RS_SHARE_ACTION_
} RS_SHARE_POLICY;

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,45 @@
# Each of these headers is checked for presence with a test #include; a
# corresponding #define will be generated in conftest/headers.h.
NV_HEADER_PRESENCE_TESTS = \
asm/system.h \
drm/drm_hdcp.h \
drm/display/drm_hdcp.h \
drm/display/drm_hdcp_helper.h \
drm/drmP.h \
drm/drm_aperture.h \
drm/drm_atomic_state_helper.h \
drm/drm_atomic_uapi.h \
drm/drm_fbdev_generic.h \
drm/drm_fbdev_ttm.h \
drm/drm_client_setup.h \
drm/drm_probe_helper.h \
drm/clients/drm_client_setup.h \
dt-bindings/interconnect/tegra_icc_id.h \
generated/autoconf.h \
generated/compile.h \
generated/utsrelease.h \
linux/aperture.h \
linux/dma-direct.h \
linux/platform/tegra/mc_utils.h \
xen/ioemu.h \
linux/fence.h \
linux/dma-resv.h \
soc/tegra/tegra_bpmp.h \
linux/platform/tegra/dce/dce-client-ipc.h \
linux/nvhost.h \
linux/nvhost_t194.h \
linux/host1x-next.h \
asm/set_memory.h \
asm/pgtable_types.h \
linux/dma-map-ops.h \
sound/hda_codec.h \
linux/interconnect.h \
linux/ioasid.h \
linux/stdarg.h \
linux/iosys-map.h \
linux/vfio_pci_core.h \
linux/cc_platform.h \
linux/slub_def.h \
asm/mshyperv.h \
crypto/sig.h

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,12 +29,7 @@
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/mm.h>
#if defined(NV_LINUX_BUG_H_PRESENT)
#include <linux/bug.h>
#else
#include <asm/bug.h>
#endif
#include <linux/bug.h>
// Today's implementation is a little simpler and more limited than the
// API description allows for in nv-kthread-q.h. Details include:
@@ -176,7 +171,7 @@ static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
{
unsigned i, j;
const static unsigned attempts = 3;
static const unsigned attempts = 3;
struct task_struct *thread[3];
for (i = 0;; i++) {
@@ -201,7 +196,7 @@ static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
// Ran out of attempts - return thread even if its stack may not be
// allocated on the preferred node
if ((i == (attempts - 1)))
if (i == (attempts - 1))
break;
// Get the NUMA node where the first page of the stack is resident. If
@@ -247,6 +242,11 @@ int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferr
return 0;
}
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
{
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
}
// Returns true (non-zero) if the item was actually scheduled, and false if the
// item was already pending in a queue.
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)

View File

@@ -25,6 +25,15 @@
#include <linux/module.h>
#include "nv-pci-table.h"
#include "cpuopsys.h"
#if defined(NV_BSD)
/* Define PCI classes that FreeBSD's linuxkpi is missing */
#define PCI_VENDOR_ID_NVIDIA 0x10de
#define PCI_CLASS_DISPLAY_VGA 0x0300
#define PCI_CLASS_DISPLAY_3D 0x0302
#define PCI_CLASS_BRIDGE_OTHER 0x0680
#endif
/* Devices supported by RM */
struct pci_device_id nv_pci_table[] = {
@@ -48,7 +57,7 @@ struct pci_device_id nv_pci_table[] = {
};
/* Devices supported by all drivers in nvidia.ko */
struct pci_device_id nv_module_device_table[] = {
struct pci_device_id nv_module_device_table[4] = {
{
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_ANY_ID,
@@ -76,4 +85,6 @@ struct pci_device_id nv_module_device_table[] = {
{ }
};
#if defined(NV_LINUX)
MODULE_DEVICE_TABLE(pci, nv_module_device_table);
#endif

View File

@@ -27,5 +27,6 @@
#include <linux/pci.h>
extern struct pci_device_id nv_pci_table[];
extern struct pci_device_id nv_module_device_table[4];
#endif /* _NV_PCI_TABLE_H_ */

View File

@@ -0,0 +1,120 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NV_COMMON_UTILS_H__
#define __NV_COMMON_UTILS_H__
#include "nvtypes.h"
#include "nvmisc.h"
#if !defined(TRUE)
#define TRUE NV_TRUE
#endif
#if !defined(FALSE)
#define FALSE NV_FALSE
#endif
#define NV_IS_UNSIGNED(x) ((__typeof__(x))-1 > 0)
/* Get the length of a statically-sized array. */
#define ARRAY_LEN(_arr) (sizeof(_arr) / sizeof(_arr[0]))
#define NV_INVALID_HEAD 0xFFFFFFFF
#define NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION (~0)
#if !defined(NV_MIN)
# define NV_MIN(a,b) (((a)<(b))?(a):(b))
#endif
#define NV_MIN3(a,b,c) NV_MIN(NV_MIN(a, b), c)
#define NV_MIN4(a,b,c,d) NV_MIN3(NV_MIN(a,b),c,d)
#if !defined(NV_MAX)
# define NV_MAX(a,b) (((a)>(b))?(a):(b))
#endif
#define NV_MAX3(a,b,c) NV_MAX(NV_MAX(a, b), c)
#define NV_MAX4(a,b,c,d) NV_MAX3(NV_MAX(a,b),c,d)
static inline int NV_LIMIT_VAL_TO_MIN_MAX(int val, int min, int max)
{
if (val < min) {
return min;
}
if (val > max) {
return max;
}
return val;
}
#define NV_ROUNDUP_DIV(x,y) ((x) / (y) + (((x) % (y)) ? 1 : 0))
/*
* Macros used for computing palette entries:
*
* NV_UNDER_REPLICATE(val, source_size, result_size) expands a value
* of source_size bits into a value of target_size bits by shifting
* the source value into the high bits and replicating the high bits
* of the value into the low bits of the result.
*
* PALETTE_DEPTH_SHIFT(val, w) maps a colormap entry for a component
* that has w bits to an appropriate entry in a LUT of 256 entries.
*/
static inline unsigned int NV_UNDER_REPLICATE(unsigned short val,
int source_size,
int result_size)
{
return (val << (result_size - source_size)) |
(val >> ((source_size << 1) - result_size));
}
static inline unsigned short PALETTE_DEPTH_SHIFT(unsigned short val, int depth)
{
return NV_UNDER_REPLICATE(val, depth, 8);
}
/*
* Use __builtin_ffs where it is supported, or provide an equivalent
* implementation for platforms like riscv where it is not.
*/
#if defined(__GNUC__) && !NVCPU_IS_RISCV64
static inline int nv_ffs(int x)
{
return __builtin_ffs(x);
}
#else
static inline int nv_ffs(int x)
{
if (x == 0)
return 0;
LOWESTBITIDX_32(x);
return 1 + x;
}
#endif
#endif /* __NV_COMMON_UTILS_H__ */

View File

@@ -1,154 +0,0 @@
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NVIDIA_DMA_FENCE_HELPER_H__
#define __NVIDIA_DMA_FENCE_HELPER_H__
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_FENCE_AVAILABLE)
/*
* Fence headers are moved to file dma-fence.h and struct fence has
* been renamed to dma_fence by commit -
*
* 2016-10-25 : f54d1867005c3323f5d8ad83eed823e84226c429
*/
#if defined(NV_LINUX_FENCE_H_PRESENT)
#include <linux/fence.h>
#else
#include <linux/dma-fence.h>
#endif
#if defined(NV_LINUX_FENCE_H_PRESENT)
typedef struct fence nv_dma_fence_t;
typedef struct fence_ops nv_dma_fence_ops_t;
typedef struct fence_cb nv_dma_fence_cb_t;
typedef fence_func_t nv_dma_fence_func_t;
#else
typedef struct dma_fence nv_dma_fence_t;
typedef struct dma_fence_ops nv_dma_fence_ops_t;
typedef struct dma_fence_cb nv_dma_fence_cb_t;
typedef dma_fence_func_t nv_dma_fence_func_t;
#endif
#if defined(NV_LINUX_FENCE_H_PRESENT)
#define NV_DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT FENCE_FLAG_ENABLE_SIGNAL_BIT
#else
#define NV_DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
#endif
static inline bool nv_dma_fence_is_signaled(nv_dma_fence_t *fence) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_is_signaled(fence);
#else
return dma_fence_is_signaled(fence);
#endif
}
static inline nv_dma_fence_t *nv_dma_fence_get(nv_dma_fence_t *fence)
{
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_get(fence);
#else
return dma_fence_get(fence);
#endif
}
static inline void nv_dma_fence_put(nv_dma_fence_t *fence) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
fence_put(fence);
#else
dma_fence_put(fence);
#endif
}
static inline signed long
nv_dma_fence_default_wait(nv_dma_fence_t *fence,
bool intr, signed long timeout) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_default_wait(fence, intr, timeout);
#else
return dma_fence_default_wait(fence, intr, timeout);
#endif
}
static inline int nv_dma_fence_signal(nv_dma_fence_t *fence) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_signal(fence);
#else
return dma_fence_signal(fence);
#endif
}
static inline int nv_dma_fence_signal_locked(nv_dma_fence_t *fence) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_signal_locked(fence);
#else
return dma_fence_signal_locked(fence);
#endif
}
static inline u64 nv_dma_fence_context_alloc(unsigned num) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_context_alloc(num);
#else
return dma_fence_context_alloc(num);
#endif
}
static inline void
nv_dma_fence_init(nv_dma_fence_t *fence,
const nv_dma_fence_ops_t *ops,
spinlock_t *lock, u64 context, uint64_t seqno) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
fence_init(fence, ops, lock, context, seqno);
#else
dma_fence_init(fence, ops, lock, context, seqno);
#endif
}
static inline void
nv_dma_fence_set_error(nv_dma_fence_t *fence,
int error) {
#if defined(NV_DMA_FENCE_SET_ERROR_PRESENT)
return dma_fence_set_error(fence, error);
#else
fence->status = error;
#endif
}
static inline int
nv_dma_fence_add_callback(nv_dma_fence_t *fence,
nv_dma_fence_cb_t *cb,
nv_dma_fence_func_t func) {
#if defined(NV_LINUX_FENCE_H_PRESENT)
return fence_add_callback(fence, cb, func);
#else
return dma_fence_add_callback(fence, cb, func);
#endif
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
#endif /* __NVIDIA_DMA_FENCE_HELPER_H__ */

View File

@@ -25,8 +25,6 @@
#include "nvidia-drm-conftest.h"
#if defined(NV_DRM_FENCE_AVAILABLE)
/*
* linux/reservation.h is renamed to linux/dma-resv.h, by commit
* 52791eeec1d9 (dma-buf: rename reservation_object to dma_resv)
@@ -39,7 +37,7 @@
#include <linux/reservation.h>
#endif
#include <nvidia-dma-fence-helper.h>
#include <linux/dma-fence.h>
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
typedef struct dma_resv nv_dma_resv_t;
@@ -108,7 +106,7 @@ static inline int nv_dma_resv_reserve_fences(nv_dma_resv_t *obj,
}
static inline void nv_dma_resv_add_excl_fence(nv_dma_resv_t *obj,
nv_dma_fence_t *fence)
struct dma_fence *fence)
{
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
#if defined(NV_DMA_RESV_ADD_FENCE_PRESENT)
@@ -121,6 +119,18 @@ static inline void nv_dma_resv_add_excl_fence(nv_dma_resv_t *obj,
#endif
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
static inline void nv_dma_resv_add_shared_fence(nv_dma_resv_t *obj,
struct dma_fence *fence)
{
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
#if defined(NV_DMA_RESV_ADD_FENCE_PRESENT)
dma_resv_add_fence(obj, fence, DMA_RESV_USAGE_READ);
#else
dma_resv_add_shared_fence(obj, fence);
#endif
#else
reservation_object_add_shared_fence(obj, fence);
#endif
}
#endif /* __NVIDIA_DMA_RESV_HELPER_H__ */

View File

@@ -24,6 +24,7 @@
#define __NVIDIA_DRM_CONFTEST_H__
#include "conftest.h"
#include "nvtypes.h"
/*
* NOTE: This file is expected to get included at the top before including any
@@ -54,11 +55,150 @@
#endif
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) || \
defined(NV_DRM_GEM_OBJECT_HAS_RESV)
#define NV_DRM_FENCE_AVAILABLE
#else
#undef NV_DRM_FENCE_AVAILABLE
#if defined(NV_DRM_FBDEV_GENERIC_SETUP_PRESENT) && \
defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
#define NV_DRM_FBDEV_AVAILABLE
#define NV_DRM_FBDEV_GENERIC_AVAILABLE
#endif
#if defined(NV_DRM_FBDEV_TTM_SETUP_PRESENT) && \
defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
#if IS_ENABLED(CONFIG_DRM_TTM_HELPER)
#define NV_DRM_FBDEV_AVAILABLE
#define NV_DRM_FBDEV_TTM_AVAILABLE
#endif
#endif
#if defined(NV_DRM_CLIENT_SETUP_PRESENT) && \
(defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT) || \
defined(NV_APERTURE_REMOVE_CONFLICTING_PCI_DEVICES_PRESENT))
// XXX remove dependency on DRM_TTM_HELPER by implementing nvidia-drm's own
// .fbdev_probe callback that uses NVKMS kapi
#if IS_ENABLED(CONFIG_DRM_TTM_HELPER)
#define NV_DRM_FBDEV_AVAILABLE
#define NV_DRM_CLIENT_AVAILABLE
#endif
#endif
/*
* Adapt to quirks in FreeBSD's Linux kernel compatibility layer.
*/
#if defined(NV_BSD)
#include <linux/rwsem.h>
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/sx.h>
/* For nv_drm_gem_prime_force_fence_signal */
#ifndef spin_is_locked
#if ((__FreeBSD_version >= 1500000) && (__FreeBSD_version < 1500018)) || (__FreeBSD_version < 1401501)
#define spin_is_locked(lock) mtx_owned(lock.m)
#else
#define spin_is_locked(lock) mtx_owned(lock)
#endif
#endif
#ifndef rwsem_is_locked
#define rwsem_is_locked(sem) (((sem)->sx.sx_lock & (SX_LOCK_SHARED)) \
|| ((sem)->sx.sx_lock & ~(SX_LOCK_FLAGMASK & ~SX_LOCK_SHARED)))
#endif
/*
* FreeBSD does not define vm_flags_t in its linuxkpi, since there is already
* a FreeBSD vm_flags_t (of a different size) and they don't want the names to
* collide. Temporarily redefine it when including nv-mm.h
*/
#define vm_flags_t unsigned long
#include "nv-mm.h"
#undef vm_flags_t
/*
* sys/nv.h and nvidia/nv.h have the same header guard
* we need to clear it for nvlist_t to get loaded
*/
#undef _NV_H_
#include <sys/nv.h>
/*
* For now just use set_page_dirty as the lock variant
* is not ported for FreeBSD. (in progress). This calls
* vm_page_dirty. Used in nv-mm.h
*/
#define set_page_dirty_lock set_page_dirty
/*
* FreeBSD does not implement drm_atomic_state_free, simply
* default to drm_atomic_state_put
*/
#define drm_atomic_state_free drm_atomic_state_put
#if __FreeBSD_version < 1300000
/* redefine LIST_HEAD_INIT to the linux version */
#include <linux/list.h>
#define LIST_HEAD_INIT(name) LINUX_LIST_HEAD_INIT(name)
#endif
/*
* FreeBSD currently has only vmf_insert_pfn_prot defined, and it has a
* static assert warning not to use it since all of DRM's usages are in
* loops with the vm obj lock(s) held. Instead we should use the lkpi
* function itself directly. For us none of this applies so we can just
* wrap it in our own definition of vmf_insert_pfn
*/
#ifndef NV_VMF_INSERT_PFN_PRESENT
#define NV_VMF_INSERT_PFN_PRESENT 1
#if __FreeBSD_version < 1300000
#define VM_SHARED (1 << 17)
/* Not present in 12.2 */
static inline vm_fault_t
lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t prot)
{
vm_object_t vm_obj = vma->vm_obj;
vm_page_t page;
vm_pindex_t pindex;
VM_OBJECT_ASSERT_WLOCKED(vm_obj);
pindex = OFF_TO_IDX(addr - vma->vm_start);
if (vma->vm_pfn_count == 0)
vma->vm_pfn_first = pindex;
MPASS(pindex <= OFF_TO_IDX(vma->vm_end));
page = vm_page_grab(vm_obj, pindex, VM_ALLOC_NORMAL);
if (page == NULL) {
page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn));
vm_page_xbusy(page);
if (vm_page_insert(page, vm_obj, pindex)) {
vm_page_xunbusy(page);
return (VM_FAULT_OOM);
}
page->valid = VM_PAGE_BITS_ALL;
}
pmap_page_set_memattr(page, pgprot2cachemode(prot));
vma->vm_pfn_count++;
return (VM_FAULT_NOPAGE);
}
#endif
static inline vm_fault_t
vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
vm_fault_t ret;
VM_OBJECT_WLOCK(vma->vm_obj);
ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, vma->vm_page_prot);
VM_OBJECT_WUNLOCK(vma->vm_obj);
return (ret);
}
#endif
#endif /* defined(NV_BSD) */
#endif /* defined(__NVIDIA_DRM_CONFTEST_H__) */

View File

@@ -228,9 +228,6 @@ nv_drm_connector_detect(struct drm_connector *connector, bool force)
}
static struct drm_connector_funcs nv_connector_funcs = {
#if defined NV_DRM_ATOMIC_HELPER_CONNECTOR_DPMS_PRESENT
.dpms = drm_atomic_helper_connector_dpms,
#endif
.destroy = nv_drm_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.force = __nv_drm_connector_force,
@@ -314,7 +311,11 @@ static int nv_drm_connector_get_modes(struct drm_connector *connector)
}
static int nv_drm_connector_mode_valid(struct drm_connector *connector,
#if defined(NV_DRM_CONNECTOR_HELPER_FUNCS_MODE_VALID_HAS_CONST_MODE_ARG)
const struct drm_display_mode *mode)
#else
struct drm_display_mode *mode)
#endif
{
struct drm_device *dev = connector->dev;
struct nv_drm_device *nv_dev = to_nv_device(dev);
@@ -349,10 +350,125 @@ nv_drm_connector_best_encoder(struct drm_connector *connector)
return NULL;
}
#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG)
static const NvU32 __nv_drm_connector_supported_colorspaces =
BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
#endif
#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT)
static int
__nv_drm_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
{
struct drm_connector_state *new_connector_state =
drm_atomic_get_new_connector_state(state, connector);
struct drm_connector_state *old_connector_state =
drm_atomic_get_old_connector_state(state, connector);
struct nv_drm_device *nv_dev = to_nv_device(connector->dev);
struct drm_crtc *crtc = new_connector_state->crtc;
struct drm_crtc_state *crtc_state;
struct nv_drm_crtc_state *nv_crtc_state;
struct NvKmsKapiHeadRequestedConfig *req_config;
if (!crtc) {
return 0;
}
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
nv_crtc_state = to_nv_crtc_state(crtc_state);
req_config = &nv_crtc_state->req_config;
/*
* Override metadata for the entire head instead of allowing NVKMS to derive
* it from the layers' metadata.
*
* This is the metadata that will sent to the display, and if applicable,
* layers will be tone mapped to this metadata rather than that of the
* display.
*/
req_config->flags.hdrInfoFrameChanged =
!drm_connector_atomic_hdr_metadata_equal(old_connector_state,
new_connector_state);
if (new_connector_state->hdr_output_metadata &&
new_connector_state->hdr_output_metadata->data) {
/*
* Note that HDMI definitions are used here even though we might not
* be using HDMI. While that seems odd, it is consistent with
* upstream behavior.
*/
struct hdr_output_metadata *hdr_metadata =
new_connector_state->hdr_output_metadata->data;
struct hdr_metadata_infoframe *info_frame =
&hdr_metadata->hdmi_metadata_type1;
unsigned int i;
if (hdr_metadata->metadata_type != HDMI_STATIC_METADATA_TYPE1) {
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(info_frame->display_primaries); i++) {
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.displayPrimaries[i].x =
info_frame->display_primaries[i].x;
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.displayPrimaries[i].y =
info_frame->display_primaries[i].y;
}
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.whitePoint.x =
info_frame->white_point.x;
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.whitePoint.y =
info_frame->white_point.y;
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxDisplayMasteringLuminance =
info_frame->max_display_mastering_luminance;
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.minDisplayMasteringLuminance =
info_frame->min_display_mastering_luminance;
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxCLL =
info_frame->max_cll;
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxFALL =
info_frame->max_fall;
req_config->modeSetConfig.hdrInfoFrame.eotf = info_frame->eotf;
req_config->modeSetConfig.hdrInfoFrame.enabled = NV_TRUE;
} else {
req_config->modeSetConfig.hdrInfoFrame.enabled = NV_FALSE;
}
req_config->flags.colorimetryChanged =
(old_connector_state->colorspace != new_connector_state->colorspace);
// When adding a case here, also add to __nv_drm_connector_supported_colorspaces
switch (new_connector_state->colorspace) {
case DRM_MODE_COLORIMETRY_DEFAULT:
req_config->modeSetConfig.colorimetry =
NVKMS_OUTPUT_COLORIMETRY_DEFAULT;
break;
case DRM_MODE_COLORIMETRY_BT2020_RGB:
case DRM_MODE_COLORIMETRY_BT2020_YCC:
// Ignore RGB/YCC
// See https://patchwork.freedesktop.org/patch/525496/?series=111865&rev=4
req_config->modeSetConfig.colorimetry =
NVKMS_OUTPUT_COLORIMETRY_BT2100;
break;
default:
// XXX HDR TODO: Add support for more color spaces
NV_DRM_DEV_LOG_ERR(nv_dev, "Unsupported color space");
return -EINVAL;
}
return 0;
}
#endif /* defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT) */
static const struct drm_connector_helper_funcs nv_connector_helper_funcs = {
.get_modes = nv_drm_connector_get_modes,
.mode_valid = nv_drm_connector_mode_valid,
.best_encoder = nv_drm_connector_best_encoder,
#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT)
.atomic_check = __nv_drm_connector_atomic_check,
#endif
};
static struct drm_connector*
@@ -405,6 +521,32 @@ nv_drm_connector_new(struct drm_device *dev,
DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
}
#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT)
if (nv_connector->type == NVKMS_CONNECTOR_TYPE_HDMI) {
#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG)
if (drm_mode_create_hdmi_colorspace_property(
&nv_connector->base,
__nv_drm_connector_supported_colorspaces) == 0) {
#else
if (drm_mode_create_hdmi_colorspace_property(&nv_connector->base) == 0) {
#endif
drm_connector_attach_colorspace_property(&nv_connector->base);
}
drm_connector_attach_hdr_output_metadata_property(&nv_connector->base);
} else if (nv_connector->type == NVKMS_CONNECTOR_TYPE_DP) {
#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG)
if (drm_mode_create_dp_colorspace_property(
&nv_connector->base,
__nv_drm_connector_supported_colorspaces) == 0) {
#else
if (drm_mode_create_dp_colorspace_property(&nv_connector->base) == 0) {
#endif
drm_connector_attach_colorspace_property(&nv_connector->base);
}
drm_connector_attach_hdr_output_metadata_property(&nv_connector->base);
}
#endif /* defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT) */
/* Register connector with DRM subsystem */
ret = drm_connector_register(&nv_connector->base);
@@ -443,16 +585,11 @@ nv_drm_get_connector(struct drm_device *dev,
char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH])
{
struct drm_connector *connector = NULL;
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
struct drm_connector_list_iter conn_iter;
nv_drm_connector_list_iter_begin(dev, &conn_iter);
#else
struct drm_mode_config *config = &dev->mode_config;
mutex_lock(&config->mutex);
#endif
drm_connector_list_iter_begin(dev, &conn_iter);
/* Lookup for existing connector with same physical index */
nv_drm_for_each_connector(connector, &conn_iter, dev) {
drm_for_each_connector_iter(connector, &conn_iter) {
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
if (nv_connector->physicalIndex == physicalIndex) {
@@ -467,11 +604,7 @@ nv_drm_get_connector(struct drm_device *dev,
connector = NULL;
done:
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
nv_drm_connector_list_iter_end(&conn_iter);
#else
mutex_unlock(&config->mutex);
#endif
drm_connector_list_iter_end(&conn_iter);
if (!connector) {
connector = nv_drm_connector_new(dev,

View File

@@ -31,9 +31,7 @@
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_CONNECTOR_H_PRESENT)
#include <drm/drm_connector.h>
#endif
#include "nvtypes.h"
#include "nvkms-api-types.h"

View File

File diff suppressed because it is too large Load Diff

View File

@@ -38,6 +38,13 @@
#include "nvtypes.h"
#include "nvkms-kapi.h"
enum nv_drm_transfer_function {
NV_DRM_TRANSFER_FUNCTION_DEFAULT,
NV_DRM_TRANSFER_FUNCTION_LINEAR,
NV_DRM_TRANSFER_FUNCTION_PQ,
NV_DRM_TRANSFER_FUNCTION_MAX,
};
struct nv_drm_crtc {
NvU32 head;
@@ -63,6 +70,8 @@ struct nv_drm_crtc {
*/
struct drm_file *modeset_permission_filep;
struct NvKmsLUTCaps olut_caps;
struct drm_crtc base;
};
@@ -142,6 +151,12 @@ struct nv_drm_crtc_state {
* nv_drm_atomic_crtc_destroy_state().
*/
struct nv_drm_flip *nv_flip;
enum nv_drm_transfer_function regamma_tf;
struct drm_property_blob *regamma_lut;
uint64_t regamma_divisor;
struct nv_drm_lut_surface *regamma_drm_lut_surface;
NvBool regamma_changed;
};
static inline struct nv_drm_crtc_state *to_nv_crtc_state(struct drm_crtc_state *state)
@@ -149,6 +164,11 @@ static inline struct nv_drm_crtc_state *to_nv_crtc_state(struct drm_crtc_state *
return container_of(state, struct nv_drm_crtc_state, base);
}
static inline const struct nv_drm_crtc_state *to_nv_crtc_state_const(const struct drm_crtc_state *state)
{
return container_of(state, struct nv_drm_crtc_state, base);
}
struct nv_drm_plane {
/**
* @base:
@@ -170,6 +190,16 @@ struct nv_drm_plane {
* Index of this plane in the per head array of layers.
*/
uint32_t layer_idx;
/**
* @supportsColorProperties
*
* If true, supports the COLOR_ENCODING and COLOR_RANGE properties.
*/
bool supportsColorProperties;
struct NvKmsLUTCaps ilut_caps;
struct NvKmsLUTCaps tmo_caps;
};
static inline struct nv_drm_plane *to_nv_plane(struct drm_plane *plane)
@@ -180,13 +210,54 @@ static inline struct nv_drm_plane *to_nv_plane(struct drm_plane *plane)
return container_of(plane, struct nv_drm_plane, base);
}
struct nv_drm_nvkms_surface {
struct NvKmsKapiDevice *pDevice;
struct NvKmsKapiMemory *nvkms_memory;
struct NvKmsKapiSurface *nvkms_surface;
void *buffer;
struct kref refcount;
};
struct nv_drm_nvkms_surface_params {
NvU32 width;
NvU32 height;
size_t surface_size;
enum NvKmsSurfaceMemoryFormat format;
};
struct nv_drm_lut_surface {
struct nv_drm_nvkms_surface base;
struct {
NvU32 vssSegments;
enum NvKmsLUTVssType vssType;
NvU32 lutEntries;
enum NvKmsLUTFormat entryFormat;
} properties;
};
struct nv_drm_plane_state {
struct drm_plane_state base;
s32 __user *fd_user_ptr;
enum NvKmsInputColorSpace input_colorspace;
enum nv_drm_input_color_space input_colorspace;
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
struct drm_property_blob *hdr_output_metadata;
#endif
struct drm_property_blob *lms_ctm;
struct drm_property_blob *lms_to_itp_ctm;
struct drm_property_blob *itp_to_lms_ctm;
struct drm_property_blob *blend_ctm;
enum nv_drm_transfer_function degamma_tf;
struct drm_property_blob *degamma_lut;
uint64_t degamma_multiplier; /* S31.32 Sign-Magnitude Format */
struct nv_drm_lut_surface *degamma_drm_lut_surface;
NvBool degamma_changed;
struct drm_property_blob *tmo_lut;
struct nv_drm_lut_surface *tmo_drm_lut_surface;
NvBool tmo_changed;
};
static inline struct nv_drm_plane_state *to_nv_drm_plane_state(struct drm_plane_state *state)

View File

File diff suppressed because it is too large Load Diff

View File

@@ -27,10 +27,18 @@
#if defined(NV_DRM_AVAILABLE)
struct NvKmsKapiGpuInfo;
int nv_drm_probe_devices(void);
void nv_drm_remove_devices(void);
void nv_drm_suspend_resume(NvBool suspend);
void nv_drm_register_drm_device(const struct NvKmsKapiGpuInfo *);
void nv_drm_update_drm_driver_features(void);
#endif /* defined(NV_DRM_AVAILABLE) */
#endif /* __NVIDIA_DRM_DRV_H__ */

View File

@@ -139,12 +139,8 @@ nv_drm_encoder_new(struct drm_device *dev,
ret = drm_encoder_init(dev,
&nv_encoder->base, &nv_encoder_funcs,
nvkms_connector_signal_to_drm_encoder_signal(format)
#if defined(NV_DRM_ENCODER_INIT_HAS_NAME_ARG)
, NULL
#endif
);
nvkms_connector_signal_to_drm_encoder_signal(format),
NULL);
if (ret != 0) {
nv_drm_free(nv_encoder);
@@ -300,7 +296,7 @@ void nv_drm_handle_display_change(struct nv_drm_device *nv_dev,
nv_drm_connector_mark_connection_status_dirty(nv_encoder->nv_connector);
drm_kms_helper_hotplug_event(dev);
schedule_delayed_work(&nv_dev->hotplug_event_work, 0);
}
void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
@@ -319,7 +315,7 @@ void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
nv_encoder = get_nv_encoder_from_nvkms_display(dev, hDisplay);
if (nv_encoder != NULL) {
NV_DRM_DEV_LOG_ERR(
NV_DRM_DEV_LOG_INFO(
nv_dev,
"Encoder with NvKmsKapiDisplay 0x%08x already exists.",
hDisplay);
@@ -336,17 +332,6 @@ void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
return;
}
/*
* On some kernels, DRM has the notion of a "primary group" that
* tracks the global mode setting state for the device.
*
* On kernels where DRM has a primary group, we need to reinitialize
* after adding encoders and connectors.
*/
#if defined(NV_DRM_REINIT_PRIMARY_MODE_GROUP_PRESENT)
drm_reinit_primary_mode_group(dev);
#endif
drm_kms_helper_hotplug_event(dev);
schedule_delayed_work(&nv_dev->hotplug_event_work, 0);
}
#endif

View File

@@ -29,11 +29,7 @@
#include "nvidia-drm-priv.h"
#if defined(NV_DRM_DRM_ENCODER_H_PRESENT)
#include <drm/drm_encoder.h>
#else
#include <drm/drmP.h>
#endif
#include "nvkms-kapi.h"

View File

@@ -36,12 +36,15 @@
static void __nv_drm_framebuffer_free(struct nv_drm_framebuffer *nv_fb)
{
struct drm_framebuffer *fb = &nv_fb->base;
uint32_t i;
/* Unreference gem object */
for (i = 0; i < ARRAY_SIZE(nv_fb->nv_gem); i++) {
if (nv_fb->nv_gem[i] != NULL) {
nv_drm_gem_object_unreference_unlocked(nv_fb->nv_gem[i]);
for (i = 0; i < NVKMS_MAX_PLANES_PER_SURFACE; i++) {
struct drm_gem_object *gem = fb->obj[i];
if (gem != NULL) {
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
nv_drm_gem_object_unreference_unlocked(nv_gem);
}
}
@@ -69,10 +72,8 @@ static int
nv_drm_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file, unsigned int *handle)
{
struct nv_drm_framebuffer *nv_fb = to_nv_framebuffer(fb);
return nv_drm_gem_handle_create(file,
nv_fb->nv_gem[0],
to_nv_gem_object(fb->obj[0]),
handle);
}
@@ -82,12 +83,12 @@ static struct drm_framebuffer_funcs nv_framebuffer_funcs = {
};
static struct nv_drm_framebuffer *nv_drm_framebuffer_alloc(
struct drm_device *dev,
struct nv_drm_device *nv_dev,
struct drm_file *file,
struct drm_mode_fb_cmd2 *cmd)
const struct drm_mode_fb_cmd2 *cmd)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_framebuffer *nv_fb;
struct nv_drm_gem_object *nv_gem;
const int num_planes = nv_drm_format_num_planes(cmd->pixel_format);
uint32_t i;
@@ -101,21 +102,22 @@ static struct nv_drm_framebuffer *nv_drm_framebuffer_alloc(
return ERR_PTR(-ENOMEM);
}
if (num_planes > ARRAY_SIZE(nv_fb->nv_gem)) {
if (num_planes > NVKMS_MAX_PLANES_PER_SURFACE) {
NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "Unsupported number of planes");
goto failed;
}
for (i = 0; i < num_planes; i++) {
if ((nv_fb->nv_gem[i] = nv_drm_gem_object_lookup(
dev,
file,
cmd->handles[i])) == NULL) {
nv_gem = nv_drm_gem_object_lookup(file, cmd->handles[i]);
if (nv_gem == NULL) {
NV_DRM_DEV_DEBUG_DRIVER(
nv_dev,
"Failed to find gem object of type nvkms memory");
goto failed;
}
nv_fb->base.obj[i] = &nv_gem->base;
}
return nv_fb;
@@ -135,12 +137,14 @@ static int nv_drm_framebuffer_init(struct drm_device *dev,
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct NvKmsKapiCreateSurfaceParams params = { };
struct nv_drm_gem_object *nv_gem;
struct drm_framebuffer *fb = &nv_fb->base;
uint32_t i;
int ret;
/* Initialize the base framebuffer object and add it to drm subsystem */
ret = drm_framebuffer_init(dev, &nv_fb->base, &nv_framebuffer_funcs);
ret = drm_framebuffer_init(dev, fb, &nv_framebuffer_funcs);
if (ret != 0) {
NV_DRM_DEV_DEBUG_DRIVER(
nv_dev,
@@ -148,23 +152,18 @@ static int nv_drm_framebuffer_init(struct drm_device *dev,
return ret;
}
for (i = 0; i < ARRAY_SIZE(nv_fb->nv_gem); i++) {
if (nv_fb->nv_gem[i] != NULL) {
if (!nvKms->isMemoryValidForDisplay(nv_dev->pDevice,
nv_fb->nv_gem[i]->pMemory)) {
NV_DRM_DEV_LOG_INFO(
nv_dev,
"Framebuffer memory not appropriate for scanout");
goto fail;
}
for (i = 0; i < NVKMS_MAX_PLANES_PER_SURFACE; i++) {
struct drm_gem_object *gem = fb->obj[i];
if (gem != NULL) {
nv_gem = to_nv_gem_object(gem);
params.planes[i].memory = nv_fb->nv_gem[i]->pMemory;
params.planes[i].offset = nv_fb->base.offsets[i];
params.planes[i].pitch = nv_fb->base.pitches[i];
params.planes[i].memory = nv_gem->pMemory;
params.planes[i].offset = fb->offsets[i];
params.planes[i].pitch = fb->pitches[i];
}
}
params.height = nv_fb->base.height;
params.width = nv_fb->base.width;
params.height = fb->height;
params.width = fb->width;
params.format = format;
if (have_modifier) {
@@ -188,6 +187,43 @@ static int nv_drm_framebuffer_init(struct drm_device *dev,
params.explicit_layout = false;
}
/*
* XXX work around an invalid pitch assumption in DRM.
*
* The smallest pitch the display hardware allows is 256.
*
* If a DRM client allocates a 32x32 cursor surface through
* DRM_IOCTL_MODE_CREATE_DUMB, we'll correctly round the pitch to 256:
*
* pitch = round(32width * 4Bpp, 256) = 256
*
* and then allocate an 8k surface:
*
* size = pitch * 32height = 8196
*
* and report the rounded pitch and size back to the client through the
* struct drm_mode_create_dumb ioctl params.
*
* But when the DRM client passes that buffer object handle to
* DRM_IOCTL_MODE_CURSOR, the client has no way to specify the pitch. This
* path in drm:
*
* DRM_IOCTL_MODE_CURSOR
* drm_mode_cursor_ioctl()
* drm_mode_cursor_common()
* drm_mode_cursor_universal()
*
* will implicitly create a framebuffer from the buffer object, and compute
* the pitch as width x 32 (without aligning to our minimum pitch).
*
* Intercept this case and force the pitch back to 256.
*/
if ((params.width == 32) &&
(params.height == 32) &&
(params.planes[0].pitch == 128)) {
params.planes[0].pitch = 256;
}
/* Create NvKmsKapiSurface */
nv_fb->pSurface = nvKms->createSurface(nv_dev->pDevice, &params);
@@ -199,23 +235,21 @@ static int nv_drm_framebuffer_init(struct drm_device *dev,
return 0;
fail:
drm_framebuffer_cleanup(&nv_fb->base);
drm_framebuffer_cleanup(fb);
return -EINVAL;
}
struct drm_framebuffer *nv_drm_internal_framebuffer_create(
struct drm_framebuffer *nv_drm_framebuffer_create(
struct drm_device *dev,
struct drm_file *file,
struct drm_mode_fb_cmd2 *cmd)
const struct drm_mode_fb_cmd2 *cmd)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct nv_drm_framebuffer *nv_fb;
uint64_t modifier = 0;
int ret;
enum NvKmsSurfaceMemoryFormat format;
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
int i;
#endif
bool have_modifier = false;
/* Check whether NvKms supports the given pixel format */
@@ -226,7 +260,6 @@ struct drm_framebuffer *nv_drm_internal_framebuffer_create(
return ERR_PTR(-EINVAL);
}
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
if (cmd->flags & DRM_MODE_FB_MODIFIERS) {
have_modifier = true;
modifier = cmd->modifier[0];
@@ -240,14 +273,13 @@ struct drm_framebuffer *nv_drm_internal_framebuffer_create(
if (nv_dev->modifiers[i] == DRM_FORMAT_MOD_INVALID) {
NV_DRM_DEV_DEBUG_DRIVER(
nv_dev,
"Invalid format modifier for framebuffer object: 0x%016llx",
"Invalid format modifier for framebuffer object: 0x%016" NvU64_fmtx,
modifier);
return ERR_PTR(-EINVAL);
}
}
#endif
nv_fb = nv_drm_framebuffer_alloc(dev, file, cmd);
nv_fb = nv_drm_framebuffer_alloc(nv_dev, file, cmd);
if (IS_ERR(nv_fb)) {
return (struct drm_framebuffer *)nv_fb;
}
@@ -255,9 +287,7 @@ struct drm_framebuffer *nv_drm_internal_framebuffer_create(
/* Fill out framebuffer metadata from the userspace fb creation request */
drm_helper_mode_fill_fb_struct(
#if defined(NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_DEV_ARG)
dev,
#endif
&nv_fb->base,
cmd);

View File

@@ -31,19 +31,13 @@
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_FRAMEBUFFER_H_PRESENT)
#include <drm/drm_framebuffer.h>
#endif
#include "nvidia-drm-gem-nvkms-memory.h"
#include "nvkms-kapi.h"
struct nv_drm_framebuffer {
struct NvKmsKapiSurface *pSurface;
struct nv_drm_gem_object*
nv_gem[NVKMS_MAX_PLANES_PER_SURFACE];
struct drm_framebuffer base;
};
@@ -56,10 +50,10 @@ static inline struct nv_drm_framebuffer *to_nv_framebuffer(
return container_of(fb, struct nv_drm_framebuffer, base);
}
struct drm_framebuffer *nv_drm_internal_framebuffer_create(
struct drm_framebuffer *nv_drm_framebuffer_create(
struct drm_device *dev,
struct drm_file *file,
struct drm_mode_fb_cmd2 *cmd);
const struct drm_mode_fb_cmd2 *cmd);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2025, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -34,9 +34,7 @@
#include "nvidia-drm-fence.h"
#include "nvidia-dma-resv-helper.h"
#if defined(NV_DRM_FENCE_AVAILABLE)
#include "nvidia-dma-fence-helper.h"
#include <linux/dma-fence.h>
#define NV_DRM_SEMAPHORE_SURFACE_FENCE_MAX_TIMEOUT_MS 5000
@@ -83,42 +81,42 @@ struct nv_drm_prime_fence_context {
struct nv_drm_prime_fence {
struct list_head list_entry;
nv_dma_fence_t base;
struct dma_fence base;
spinlock_t lock;
};
static inline
struct nv_drm_prime_fence *to_nv_drm_prime_fence(nv_dma_fence_t *fence)
struct nv_drm_prime_fence *to_nv_drm_prime_fence(struct dma_fence *fence)
{
return container_of(fence, struct nv_drm_prime_fence, base);
}
static const char*
nv_drm_gem_fence_op_get_driver_name(nv_dma_fence_t *fence)
nv_drm_gem_fence_op_get_driver_name(struct dma_fence *fence)
{
return "NVIDIA";
}
static const char*
nv_drm_gem_prime_fence_op_get_timeline_name(nv_dma_fence_t *fence)
nv_drm_gem_prime_fence_op_get_timeline_name(struct dma_fence *fence)
{
return "nvidia.prime";
}
static bool nv_drm_gem_prime_fence_op_enable_signaling(nv_dma_fence_t *fence)
static bool nv_drm_gem_prime_fence_op_enable_signaling(struct dma_fence *fence)
{
// DO NOTHING
return true;
}
static void nv_drm_gem_prime_fence_op_release(nv_dma_fence_t *fence)
static void nv_drm_gem_prime_fence_op_release(struct dma_fence *fence)
{
struct nv_drm_prime_fence *nv_fence = to_nv_drm_prime_fence(fence);
nv_drm_free(nv_fence);
}
static signed long
nv_drm_gem_prime_fence_op_wait(nv_dma_fence_t *fence,
nv_drm_gem_prime_fence_op_wait(struct dma_fence *fence,
bool intr, signed long timeout)
{
/*
@@ -131,12 +129,12 @@ nv_drm_gem_prime_fence_op_wait(nv_dma_fence_t *fence,
* that it should never get hit during normal operation, but not so long
* that the system becomes unresponsive.
*/
return nv_dma_fence_default_wait(fence, intr,
return dma_fence_default_wait(fence, intr,
(timeout == MAX_SCHEDULE_TIMEOUT) ?
msecs_to_jiffies(96) : timeout);
}
static const nv_dma_fence_ops_t nv_drm_gem_prime_fence_ops = {
static const struct dma_fence_ops nv_drm_gem_prime_fence_ops = {
.get_driver_name = nv_drm_gem_fence_op_get_driver_name,
.get_timeline_name = nv_drm_gem_prime_fence_op_get_timeline_name,
.enable_signaling = nv_drm_gem_prime_fence_op_enable_signaling,
@@ -148,8 +146,8 @@ static inline void
__nv_drm_prime_fence_signal(struct nv_drm_prime_fence *nv_fence)
{
list_del(&nv_fence->list_entry);
nv_dma_fence_signal(&nv_fence->base);
nv_dma_fence_put(&nv_fence->base);
dma_fence_signal(&nv_fence->base);
dma_fence_put(&nv_fence->base);
}
static void nv_drm_gem_prime_force_fence_signal(
@@ -289,18 +287,16 @@ __nv_drm_prime_fence_context_new(
}
/*
* nv_dma_fence_context_alloc() cannot fail, so we do not need
* dma_fence_context_alloc() cannot fail, so we do not need
* to check a return value.
*/
*nv_prime_fence_context = (struct nv_drm_prime_fence_context) {
.base.ops = &nv_drm_prime_fence_context_ops,
.base.nv_dev = nv_dev,
.base.context = nv_dma_fence_context_alloc(1),
.base.fenceSemIndex = p->index,
.pSemSurface = pSemSurface,
.pLinearAddress = pLinearAddress,
};
nv_prime_fence_context->base.ops = &nv_drm_prime_fence_context_ops;
nv_prime_fence_context->base.nv_dev = nv_dev;
nv_prime_fence_context->base.context = dma_fence_context_alloc(1);
nv_prime_fence_context->base.fenceSemIndex = p->index;
nv_prime_fence_context->pSemSurface = pSemSurface;
nv_prime_fence_context->pLinearAddress = pLinearAddress;
INIT_LIST_HEAD(&nv_prime_fence_context->pending);
@@ -345,7 +341,7 @@ failed:
return NULL;
}
static nv_dma_fence_t *__nv_drm_prime_fence_context_create_fence(
static struct dma_fence *__nv_drm_prime_fence_context_create_fence(
struct nv_drm_prime_fence_context *nv_prime_fence_context,
unsigned int seqno)
{
@@ -371,12 +367,12 @@ static nv_dma_fence_t *__nv_drm_prime_fence_context_create_fence(
spin_lock_init(&nv_fence->lock);
nv_dma_fence_init(&nv_fence->base, &nv_drm_gem_prime_fence_ops,
&nv_fence->lock, nv_prime_fence_context->base.context,
seqno);
dma_fence_init(&nv_fence->base, &nv_drm_gem_prime_fence_ops,
&nv_fence->lock, nv_prime_fence_context->base.context,
seqno);
/* The context maintains a reference to any pending fences. */
nv_dma_fence_get(&nv_fence->base);
dma_fence_get(&nv_fence->base);
list_add_tail(&nv_fence->list_entry, &nv_prime_fence_context->pending);
@@ -426,12 +422,11 @@ const struct nv_drm_gem_object_funcs nv_fence_context_gem_ops = {
static inline
struct nv_drm_fence_context *
__nv_drm_fence_context_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
struct nv_drm_gem_object *nv_gem =
nv_drm_gem_object_lookup(dev, filp, handle);
nv_drm_gem_object_lookup(filp, handle);
if (nv_gem != NULL && nv_gem->ops != &nv_fence_context_gem_ops) {
nv_drm_gem_object_unreference_unlocked(nv_gem);
@@ -465,10 +460,15 @@ int nv_drm_prime_fence_context_create_ioctl(struct drm_device *dev,
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_prime_fence_context_create_params *p = data;
struct nv_drm_prime_fence_context *nv_prime_fence_context =
__nv_drm_prime_fence_context_new(nv_dev, p);
struct nv_drm_prime_fence_context *nv_prime_fence_context;
int err;
if (nv_dev->pDevice == NULL) {
return -EOPNOTSUPP;
}
nv_prime_fence_context = __nv_drm_prime_fence_context_new(nv_dev, p);
if (!nv_prime_fence_context) {
goto done;
}
@@ -487,6 +487,31 @@ done:
return -ENOMEM;
}
static int __nv_drm_gem_attach_fence(struct nv_drm_gem_object *nv_gem,
struct dma_fence *fence,
bool shared)
{
nv_dma_resv_t *resv = nv_drm_gem_res_obj(nv_gem);
int ret;
nv_dma_resv_lock(resv, NULL);
ret = nv_dma_resv_reserve_fences(resv, 1, shared);
if (ret == 0) {
if (shared) {
nv_dma_resv_add_shared_fence(resv, fence);
} else {
nv_dma_resv_add_excl_fence(resv, fence);
}
} else {
NV_DRM_LOG_ERR("Failed to reserve fence. Error code: %d", ret);
}
nv_dma_resv_unlock(resv);
return ret;
}
int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep)
{
@@ -496,16 +521,19 @@ int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
struct nv_drm_gem_object *nv_gem;
struct nv_drm_fence_context *nv_fence_context;
struct dma_fence *fence;
nv_dma_fence_t *fence;
nv_dma_resv_t *resv;
if (nv_dev->pDevice == NULL) {
ret = -EOPNOTSUPP;
goto done;
}
if (p->__pad != 0) {
NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed");
goto done;
}
nv_gem = nv_drm_gem_object_lookup(nv_dev->dev, filep, p->handle);
nv_gem = nv_drm_gem_object_lookup(filep, p->handle);
if (!nv_gem) {
NV_DRM_DEV_LOG_ERR(
@@ -517,7 +545,6 @@ int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
}
if((nv_fence_context = __nv_drm_fence_context_lookup(
nv_dev->dev,
filep,
p->fence_context_handle)) == NULL) {
@@ -554,23 +581,9 @@ int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
goto fence_context_create_fence_failed;
}
resv = nv_drm_gem_res_obj(nv_gem);
ret = __nv_drm_gem_attach_fence(nv_gem, fence, true /* exclusive */);
nv_dma_resv_lock(resv, NULL);
ret = nv_dma_resv_reserve_fences(resv, 1, false);
if (ret == 0) {
nv_dma_resv_add_excl_fence(resv, fence);
} else {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to reserve fence. Error code: %d", ret);
}
nv_dma_resv_unlock(resv);
/* dma_resv_add_excl_fence takes its own reference to the fence. */
nv_dma_fence_put(fence);
dma_fence_put(fence);
fence_context_create_fence_failed:
nv_drm_gem_object_unreference_unlocked(&nv_fence_context->base);
@@ -583,7 +596,7 @@ done:
}
struct nv_drm_semsurf_fence {
nv_dma_fence_t base;
struct dma_fence base;
spinlock_t lock;
/*
@@ -611,7 +624,7 @@ struct nv_drm_semsurf_fence_callback {
};
struct nv_drm_sync_fd_wait_data {
nv_dma_fence_cb_t dma_fence_cb;
struct dma_fence_cb dma_fence_cb;
struct nv_drm_semsurf_fence_ctx *ctx;
nv_drm_work work; /* Deferred second half of fence wait callback */
@@ -742,15 +755,15 @@ __nv_drm_semsurf_force_complete_pending(struct nv_drm_semsurf_fence_ctx *ctx)
&ctx->pending_fences,
typeof(*nv_fence),
pending_node);
nv_dma_fence_t *fence = &nv_fence->base;
struct dma_fence *fence = &nv_fence->base;
list_del(&nv_fence->pending_node);
nv_dma_fence_set_error(fence, -ETIMEDOUT);
nv_dma_fence_signal(fence);
dma_fence_set_error(fence, -ETIMEDOUT);
dma_fence_signal(fence);
/* Remove the pending list's reference */
nv_dma_fence_put(fence);
dma_fence_put(fence);
}
/*
@@ -807,7 +820,7 @@ __nv_drm_semsurf_ctx_process_completed(struct nv_drm_semsurf_fence_ctx *ctx,
struct list_head finished;
struct list_head timed_out;
struct nv_drm_semsurf_fence *nv_fence;
nv_dma_fence_t *fence;
struct dma_fence *fence;
NvU64 currentSeqno = __nv_drm_get_semsurf_ctx_seqno(ctx);
NvU64 fenceSeqno = 0;
unsigned long flags;
@@ -871,8 +884,8 @@ __nv_drm_semsurf_ctx_process_completed(struct nv_drm_semsurf_fence_ctx *ctx,
nv_fence = list_first_entry(&finished, typeof(*nv_fence), pending_node);
list_del_init(&nv_fence->pending_node);
fence = &nv_fence->base;
nv_dma_fence_signal(fence);
nv_dma_fence_put(fence); /* Drops the pending list's reference */
dma_fence_signal(fence);
dma_fence_put(fence); /* Drops the pending list's reference */
}
while (!list_empty(&timed_out)) {
@@ -880,9 +893,9 @@ __nv_drm_semsurf_ctx_process_completed(struct nv_drm_semsurf_fence_ctx *ctx,
pending_node);
list_del_init(&nv_fence->pending_node);
fence = &nv_fence->base;
nv_dma_fence_set_error(fence, -ETIMEDOUT);
nv_dma_fence_signal(fence);
nv_dma_fence_put(fence); /* Drops the pending list's reference */
dma_fence_set_error(fence, -ETIMEDOUT);
dma_fence_signal(fence);
dma_fence_put(fence); /* Drops the pending list's reference */
}
}
@@ -1115,7 +1128,7 @@ static void __nv_drm_semsurf_fence_ctx_destroy(
*/
nv_drm_workthread_shutdown(&ctx->worker);
nv_drm_del_timer_sync(&ctx->timer);
nv_timer_delete_sync(&ctx->timer.kernel_timer);
/*
* The semaphore surface could still be sending callbacks, so it is still
@@ -1248,22 +1261,20 @@ __nv_drm_semsurf_fence_ctx_new(
}
/*
* nv_dma_fence_context_alloc() cannot fail, so we do not need
* dma_fence_context_alloc() cannot fail, so we do not need
* to check a return value.
*/
*ctx = (struct nv_drm_semsurf_fence_ctx) {
.base.ops = &nv_drm_semsurf_fence_ctx_ops,
.base.nv_dev = nv_dev,
.base.context = nv_dma_fence_context_alloc(1),
.base.fenceSemIndex = p->index,
.pSemSurface = pSemSurface,
.pSemMapping.pVoid = semMapping,
.pMaxSubmittedMapping = (volatile NvU64 *)maxSubmittedMapping,
.callback.local = NULL,
.callback.nvKms = NULL,
.current_wait_value = 0,
};
ctx->base.ops = &nv_drm_semsurf_fence_ctx_ops;
ctx->base.nv_dev = nv_dev;
ctx->base.context = dma_fence_context_alloc(1);
ctx->base.fenceSemIndex = p->index;
ctx->pSemSurface = pSemSurface;
ctx->pSemMapping.pVoid = semMapping;
ctx->pMaxSubmittedMapping = (volatile NvU64 *)maxSubmittedMapping;
ctx->callback.local = NULL;
ctx->callback.nvKms = NULL;
ctx->current_wait_value = 0;
spin_lock_init(&ctx->lock);
INIT_LIST_HEAD(&ctx->pending_fences);
@@ -1303,6 +1314,10 @@ int nv_drm_semsurf_fence_ctx_create_ioctl(struct drm_device *dev,
struct nv_drm_semsurf_fence_ctx *ctx;
int err;
if (nv_dev->pDevice == NULL) {
return -EOPNOTSUPP;
}
if (p->__pad != 0) {
NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed");
return -EINVAL;
@@ -1324,26 +1339,26 @@ int nv_drm_semsurf_fence_ctx_create_ioctl(struct drm_device *dev,
}
static inline struct nv_drm_semsurf_fence*
to_nv_drm_semsurf_fence(nv_dma_fence_t *fence)
to_nv_drm_semsurf_fence(struct dma_fence *fence)
{
return container_of(fence, struct nv_drm_semsurf_fence, base);
}
static const char*
__nv_drm_semsurf_fence_op_get_timeline_name(nv_dma_fence_t *fence)
__nv_drm_semsurf_fence_op_get_timeline_name(struct dma_fence *fence)
{
return "nvidia.semaphore_surface";
}
static bool
__nv_drm_semsurf_fence_op_enable_signaling(nv_dma_fence_t *fence)
__nv_drm_semsurf_fence_op_enable_signaling(struct dma_fence *fence)
{
// DO NOTHING - Could defer RM callback registration until this point
return true;
}
static void
__nv_drm_semsurf_fence_op_release(nv_dma_fence_t *fence)
__nv_drm_semsurf_fence_op_release(struct dma_fence *fence)
{
struct nv_drm_semsurf_fence *nv_fence =
to_nv_drm_semsurf_fence(fence);
@@ -1351,11 +1366,12 @@ __nv_drm_semsurf_fence_op_release(nv_dma_fence_t *fence)
nv_drm_free(nv_fence);
}
static const nv_dma_fence_ops_t nv_drm_semsurf_fence_ops = {
static const struct dma_fence_ops nv_drm_semsurf_fence_ops = {
.get_driver_name = nv_drm_gem_fence_op_get_driver_name,
.get_timeline_name = __nv_drm_semsurf_fence_op_get_timeline_name,
.enable_signaling = __nv_drm_semsurf_fence_op_enable_signaling,
.release = __nv_drm_semsurf_fence_op_release,
.wait = dma_fence_default_wait,
#if defined(NV_DMA_FENCE_OPS_HAS_USE_64BIT_SEQNO)
.use_64bit_seqno = true,
#endif
@@ -1381,7 +1397,7 @@ __nv_drm_semsurf_ctx_add_pending(struct nv_drm_semsurf_fence_ctx *ctx,
}
/* Add a reference to the fence for the list */
nv_dma_fence_get(&nv_fence->base);
dma_fence_get(&nv_fence->base);
INIT_LIST_HEAD(&nv_fence->pending_node);
nv_fence->timeout = nv_drm_timeout_from_ms(timeoutMS);
@@ -1414,16 +1430,21 @@ __nv_drm_semsurf_ctx_add_pending(struct nv_drm_semsurf_fence_ctx *ctx,
__nv_drm_semsurf_ctx_reg_callbacks(ctx);
}
static nv_dma_fence_t *__nv_drm_semsurf_fence_ctx_create_fence(
static struct dma_fence *__nv_drm_semsurf_fence_ctx_create_fence(
struct nv_drm_device *nv_dev,
struct nv_drm_semsurf_fence_ctx *ctx,
NvU64 wait_value,
NvU64 timeout_value_ms)
{
struct nv_drm_semsurf_fence *nv_fence;
nv_dma_fence_t *fence;
struct dma_fence *fence;
int ret = 0;
if (timeout_value_ms == 0 ||
timeout_value_ms > NV_DRM_SEMAPHORE_SURFACE_FENCE_MAX_TIMEOUT_MS) {
timeout_value_ms = NV_DRM_SEMAPHORE_SURFACE_FENCE_MAX_TIMEOUT_MS;
}
if ((nv_fence = nv_drm_calloc(1, sizeof(*nv_fence))) == NULL) {
ret = -ENOMEM;
goto out;
@@ -1436,9 +1457,9 @@ static nv_dma_fence_t *__nv_drm_semsurf_fence_ctx_create_fence(
#endif
/* Initializes the fence with one reference (for the caller) */
nv_dma_fence_init(fence, &nv_drm_semsurf_fence_ops,
&nv_fence->lock,
ctx->base.context, wait_value);
dma_fence_init(fence, &nv_drm_semsurf_fence_ops,
&nv_fence->lock,
ctx->base.context, wait_value);
__nv_drm_semsurf_ctx_add_pending(ctx, nv_fence, timeout_value_ms);
@@ -1454,18 +1475,21 @@ int nv_drm_semsurf_fence_create_ioctl(struct drm_device *dev,
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_semsurf_fence_create_params *p = data;
struct nv_drm_fence_context *nv_fence_context;
nv_dma_fence_t *fence;
uint32_t timeout = NV_DRM_SEMAPHORE_SURFACE_FENCE_MAX_TIMEOUT_MS;
struct dma_fence *fence;
int ret = -EINVAL;
int fd;
if (nv_dev->pDevice == NULL) {
ret = -EOPNOTSUPP;
goto done;
}
if (p->__pad != 0) {
NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed");
goto done;
}
if ((nv_fence_context = __nv_drm_fence_context_lookup(
nv_dev->dev,
filep,
p->fence_context_handle)) == NULL) {
NV_DRM_DEV_LOG_ERR(
@@ -1485,16 +1509,11 @@ int nv_drm_semsurf_fence_create_ioctl(struct drm_device *dev,
goto fence_context_create_fence_failed;
}
if ((p->timeout_value_ms != 0) &&
(p->timeout_value_ms < timeout)) {
timeout = p->timeout_value_ms;
}
fence = __nv_drm_semsurf_fence_ctx_create_fence(
nv_dev,
to_semsurf_fence_ctx(nv_fence_context),
p->wait_value,
timeout);
p->timeout_value_ms);
if (IS_ERR(fence)) {
ret = PTR_ERR(fence);
@@ -1526,7 +1545,7 @@ fence_context_create_sync_failed:
* FD will still hold a reference, and the pending list (if the fence hasn't
* already been signaled) will also retain a reference.
*/
nv_dma_fence_put(fence);
dma_fence_put(fence);
fence_context_create_fence_failed:
nv_drm_gem_object_unreference_unlocked(&nv_fence_context->base);
@@ -1584,8 +1603,8 @@ __nv_drm_semsurf_wait_fence_work_cb
static void
__nv_drm_semsurf_wait_fence_cb
(
nv_dma_fence_t *fence,
nv_dma_fence_cb_t *cb
struct dma_fence *fence,
struct dma_fence_cb *cb
)
{
struct nv_drm_sync_fd_wait_data *wait_data =
@@ -1610,7 +1629,7 @@ __nv_drm_semsurf_wait_fence_cb
}
/* Don't need to reference the fence anymore, just the fence context. */
nv_dma_fence_put(fence);
dma_fence_put(fence);
}
int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
@@ -1622,20 +1641,23 @@ int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
struct nv_drm_fence_context *nv_fence_context;
struct nv_drm_semsurf_fence_ctx *ctx;
struct nv_drm_sync_fd_wait_data *wait_data = NULL;
nv_dma_fence_t *fence;
struct dma_fence *fence;
unsigned long flags;
int ret = -EINVAL;
if (nv_dev->pDevice == NULL) {
return -EOPNOTSUPP;
}
if (p->pre_wait_value >= p->post_wait_value) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Non-monotonic wait values specified to fence wait: 0x%llu, 0x%llu",
"Non-monotonic wait values specified to fence wait: 0x%" NvU64_fmtu ", 0x%" NvU64_fmtu,
p->pre_wait_value, p->post_wait_value);
goto done;
}
if ((nv_fence_context = __nv_drm_fence_context_lookup(
nv_dev->dev,
filep,
p->fence_context_handle)) == NULL) {
NV_DRM_DEV_LOG_ERR(
@@ -1688,9 +1710,9 @@ int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
list_add(&wait_data->pending_node, &ctx->pending_waits);
spin_unlock_irqrestore(&ctx->lock, flags);
ret = nv_dma_fence_add_callback(fence,
&wait_data->dma_fence_cb,
__nv_drm_semsurf_wait_fence_cb);
ret = dma_fence_add_callback(fence,
&wait_data->dma_fence_cb,
__nv_drm_semsurf_wait_fence_cb);
if (ret) {
if (ret == -ENOENT) {
@@ -1702,7 +1724,7 @@ int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
}
/* Execute second half of wait immediately, avoiding the worker thread */
nv_dma_fence_put(fence);
dma_fence_put(fence);
__nv_drm_semsurf_wait_fence_work_cb(wait_data);
}
@@ -1723,6 +1745,85 @@ done:
return 0;
}
#endif /* NV_DRM_FENCE_AVAILABLE */
int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev,
void *data,
struct drm_file *filep)
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
struct drm_nvidia_semsurf_fence_attach_params *p = data;
struct nv_drm_gem_object *nv_gem = NULL;
struct nv_drm_fence_context *nv_fence_context = NULL;
struct dma_fence *fence;
int ret = -EINVAL;
if (nv_dev->pDevice == NULL) {
ret = -EOPNOTSUPP;
goto done;
}
nv_gem = nv_drm_gem_object_lookup(filep, p->handle);
if (!nv_gem) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to lookup gem object for fence attach: 0x%08x",
p->handle);
goto done;
}
nv_fence_context = __nv_drm_fence_context_lookup(
filep,
p->fence_context_handle);
if (!nv_fence_context) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to lookup gem object for fence context: 0x%08x",
p->fence_context_handle);
goto done;
}
if (nv_fence_context->ops != &nv_drm_semsurf_fence_ctx_ops) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Wrong fence context type: 0x%08x",
p->fence_context_handle);
goto done;
}
fence = __nv_drm_semsurf_fence_ctx_create_fence(
nv_dev,
to_semsurf_fence_ctx(nv_fence_context),
p->wait_value,
p->timeout_value_ms);
if (IS_ERR(fence)) {
ret = PTR_ERR(fence);
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to allocate fence: 0x%08x", p->handle);
goto done;
}
ret = __nv_drm_gem_attach_fence(nv_gem, fence, p->shared);
dma_fence_put(fence);
done:
if (nv_fence_context) {
nv_drm_gem_object_unreference_unlocked(&nv_fence_context->base);
}
if (nv_gem) {
nv_drm_gem_object_unreference_unlocked(nv_gem);
}
return ret;
}
#endif /* NV_DRM_AVAILABLE */

View File

@@ -30,8 +30,6 @@
struct drm_file;
struct drm_device;
#if defined(NV_DRM_FENCE_AVAILABLE)
int nv_drm_fence_supported_ioctl(struct drm_device *dev,
void *data, struct drm_file *filep);
@@ -53,7 +51,9 @@ int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
void *data,
struct drm_file *filep);
#endif /* NV_DRM_FENCE_AVAILABLE */
int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev,
void *data,
struct drm_file *filep);
#endif /* NV_DRM_AVAILABLE */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2025, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -35,15 +35,22 @@
static const u32 nvkms_to_drm_format[] = {
/* RGB formats */
[NvKmsSurfaceMemoryFormatA1R5G5B5] = DRM_FORMAT_ARGB1555,
[NvKmsSurfaceMemoryFormatX1R5G5B5] = DRM_FORMAT_XRGB1555,
[NvKmsSurfaceMemoryFormatR5G6B5] = DRM_FORMAT_RGB565,
[NvKmsSurfaceMemoryFormatA8R8G8B8] = DRM_FORMAT_ARGB8888,
[NvKmsSurfaceMemoryFormatX8R8G8B8] = DRM_FORMAT_XRGB8888,
[NvKmsSurfaceMemoryFormatX8B8G8R8] = DRM_FORMAT_XBGR8888,
[NvKmsSurfaceMemoryFormatA2B10G10R10] = DRM_FORMAT_ABGR2101010,
[NvKmsSurfaceMemoryFormatX2B10G10R10] = DRM_FORMAT_XBGR2101010,
[NvKmsSurfaceMemoryFormatA8B8G8R8] = DRM_FORMAT_ABGR8888,
[NvKmsSurfaceMemoryFormatA1R5G5B5] = DRM_FORMAT_ARGB1555,
[NvKmsSurfaceMemoryFormatX1R5G5B5] = DRM_FORMAT_XRGB1555,
[NvKmsSurfaceMemoryFormatR5G6B5] = DRM_FORMAT_RGB565,
[NvKmsSurfaceMemoryFormatA8R8G8B8] = DRM_FORMAT_ARGB8888,
[NvKmsSurfaceMemoryFormatX8R8G8B8] = DRM_FORMAT_XRGB8888,
[NvKmsSurfaceMemoryFormatX8B8G8R8] = DRM_FORMAT_XBGR8888,
[NvKmsSurfaceMemoryFormatA2B10G10R10] = DRM_FORMAT_ABGR2101010,
[NvKmsSurfaceMemoryFormatX2B10G10R10] = DRM_FORMAT_XBGR2101010,
[NvKmsSurfaceMemoryFormatA8B8G8R8] = DRM_FORMAT_ABGR8888,
#if defined(DRM_FORMAT_ABGR16161616)
/*
* DRM_FORMAT_ABGR16161616 was introduced by Linux kernel commit
* ff92ecf575a92 (v5.14).
*/
[NvKmsSurfaceMemoryFormatR16G16B16A16] = DRM_FORMAT_ABGR16161616,
#endif
#if defined(DRM_FORMAT_ABGR16161616F)
[NvKmsSurfaceMemoryFormatRF16GF16BF16AF16] = DRM_FORMAT_ABGR16161616F,
#endif
@@ -166,4 +173,37 @@ uint32_t *nv_drm_format_array_alloc(
return array;
}
bool nv_drm_format_is_yuv(u32 format)
{
#if defined(NV_DRM_FORMAT_INFO_HAS_IS_YUV)
const struct drm_format_info *format_info = drm_format_info(format);
return (format_info != NULL) && format_info->is_yuv;
#else
switch (format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
#if defined(DRM_FORMAT_P210)
case DRM_FORMAT_P210:
#endif
#if defined(DRM_FORMAT_P010)
case DRM_FORMAT_P010:
#endif
#if defined(DRM_FORMAT_P012)
case DRM_FORMAT_P012:
#endif
return true;
default:
return false;
}
#endif
}
#endif

View File

@@ -38,6 +38,8 @@ uint32_t *nv_drm_format_array_alloc(
unsigned int *count,
const long unsigned int nvkms_format_mask);
bool nv_drm_format_is_yuv(u32 format);
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
#endif /* __NVIDIA_DRM_FORMAT_H__ */

View File

@@ -24,17 +24,13 @@
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
#include "nvidia-drm-gem-dma-buf.h"
#include "nvidia-drm-ioctl.h"
@@ -71,12 +67,42 @@ static int __nv_drm_gem_dma_buf_create_mmap_offset(
static int __nv_drm_gem_dma_buf_mmap(struct nv_drm_gem_object *nv_gem,
struct vm_area_struct *vma)
{
#if defined(NV_LINUX)
struct dma_buf_attachment *attach = nv_gem->base.import_attach;
struct dma_buf *dma_buf = attach->dmabuf;
#endif
struct file *old_file;
int ret;
/* check if buffer supports mmap */
#if defined(NV_BSD)
/*
* Most of the FreeBSD DRM code refers to struct file*, which is actually
* a struct linux_file*. The dmabuf code in FreeBSD is not actually plumbed
* through the same linuxkpi bits it seems (probably so it can be used
* elsewhere), so dma_buf->file really is a native FreeBSD struct file...
*/
if (!nv_gem->base.filp->f_op->mmap)
return -EINVAL;
/* readjust the vma */
get_file(nv_gem->base.filp);
old_file = vma->vm_file;
vma->vm_file = nv_gem->base.filp;
vma->vm_pgoff -= drm_vma_node_start(&nv_gem->base.vma_node);
ret = nv_gem->base.filp->f_op->mmap(nv_gem->base.filp, vma);
if (ret) {
/* restore old parameters on failure */
vma->vm_file = old_file;
vma->vm_pgoff += drm_vma_node_start(&nv_gem->base.vma_node);
fput(nv_gem->base.filp);
} else {
if (old_file)
fput(old_file);
}
#else
if (!dma_buf->file->f_op->mmap)
return -EINVAL;
@@ -84,18 +110,20 @@ static int __nv_drm_gem_dma_buf_mmap(struct nv_drm_gem_object *nv_gem,
get_file(dma_buf->file);
old_file = vma->vm_file;
vma->vm_file = dma_buf->file;
vma->vm_pgoff -= drm_vma_node_start(&nv_gem->base.vma_node);;
vma->vm_pgoff -= drm_vma_node_start(&nv_gem->base.vma_node);
ret = dma_buf->file->f_op->mmap(dma_buf->file, vma);
if (ret) {
/* restore old parameters on failure */
vma->vm_file = old_file;
vma->vm_pgoff += drm_vma_node_start(&nv_gem->base.vma_node);
fput(dma_buf->file);
} else {
if (old_file)
fput(old_file);
}
#endif
return ret;
}
@@ -162,7 +190,7 @@ int nv_drm_gem_export_dmabuf_memory_ioctl(struct drm_device *dev,
}
if ((nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(
dev, filep, p->handle)) == NULL) {
filep, p->handle)) == NULL) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(
nv_dev,

View File

@@ -48,12 +48,11 @@ static inline struct nv_drm_gem_dma_buf *to_nv_dma_buf(
static inline
struct nv_drm_gem_dma_buf *nv_drm_gem_object_dma_buf_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
struct nv_drm_gem_object *nv_gem =
nv_drm_gem_object_lookup(dev, filp, handle);
nv_drm_gem_object_lookup(filp, handle);
if (nv_gem != NULL && nv_gem->ops != &__nv_gem_dma_buf_ops) {
nv_drm_gem_object_unreference_unlocked(nv_gem);

View File

@@ -28,17 +28,13 @@
#include "nvidia-drm-helper.h"
#include "nvidia-drm-ioctl.h"
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
#include <linux/io.h>
#include "nv-mm.h"
#if defined(NV_BSD)
#include <vm/vm_pageout.h>
#endif
static void __nv_drm_gem_nvkms_memory_free(struct nv_drm_gem_object *nv_gem)
{
@@ -68,9 +64,20 @@ static void __nv_drm_gem_nvkms_memory_free(struct nv_drm_gem_object *nv_gem)
nv_drm_free(nv_nvkms_memory);
}
static int __nv_drm_gem_nvkms_map(
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory);
static int __nv_drm_gem_nvkms_mmap(struct nv_drm_gem_object *nv_gem,
struct vm_area_struct *vma)
{
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
to_nv_nvkms_memory(nv_gem);
int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
if (ret) {
return ret;
}
return drm_gem_mmap_obj(&nv_gem->base,
drm_vma_node_size(&nv_gem->base.vma_node) << PAGE_SHIFT, vma);
}
@@ -83,7 +90,7 @@ static vm_fault_t __nv_drm_gem_nvkms_handle_vma_fault(
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
to_nv_nvkms_memory(nv_gem);
unsigned long address = nv_page_fault_va(vmf);
unsigned long address = vmf->address;
struct drm_gem_object *gem = vma->vm_private_data;
unsigned long page_offset, pfn;
vm_fault_t ret;
@@ -93,7 +100,17 @@ static vm_fault_t __nv_drm_gem_nvkms_handle_vma_fault(
if (nv_nvkms_memory->pages_count == 0) {
pfn = (unsigned long)(uintptr_t)nv_nvkms_memory->pPhysicalAddress;
pfn >>= PAGE_SHIFT;
#if defined(NV_LINUX)
/*
* FreeBSD doesn't set pgoff. We instead have pfn be the base physical
* address, and we will calculate the index pidx from the virtual address.
*
* This only works because linux_cdev_pager_populate passes the pidx as
* vmf->virtual_address. Then we turn the virtual address
* into a physical page number.
*/
pfn += page_offset;
#endif
} else {
BUG_ON(page_offset >= nv_nvkms_memory->pages_count);
pfn = page_to_pfn(nv_nvkms_memory->pages[page_offset]);
@@ -133,11 +150,18 @@ static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup(
static int __nv_drm_gem_nvkms_map(
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory)
{
int ret = 0;
struct nv_drm_device *nv_dev = nv_nvkms_memory->base.nv_dev;
struct NvKmsKapiMemory *pMemory = nv_nvkms_memory->base.pMemory;
if (!nv_dev->hasVideoMemory) {
return 0;
mutex_lock(&nv_nvkms_memory->map_lock);
if (nv_nvkms_memory->physically_mapped) {
goto done;
}
if (!nvKms->isVidmem(pMemory)) {
goto done;
}
if (!nvKms->mapMemory(nv_dev->pDevice,
@@ -148,7 +172,8 @@ static int __nv_drm_gem_nvkms_map(
nv_dev,
"Failed to map NvKmsKapiMemory 0x%p",
pMemory);
return -ENOMEM;
ret = -ENOMEM;
goto done;
}
nv_nvkms_memory->pWriteCombinedIORemapAddress = ioremap_wc(
@@ -164,7 +189,9 @@ static int __nv_drm_gem_nvkms_map(
nv_nvkms_memory->physically_mapped = true;
return 0;
done:
mutex_unlock(&nv_nvkms_memory->map_lock);
return ret;
}
static void *__nv_drm_gem_nvkms_prime_vmap(
@@ -173,14 +200,40 @@ static void *__nv_drm_gem_nvkms_prime_vmap(
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
to_nv_nvkms_memory(nv_gem);
if (!nv_nvkms_memory->physically_mapped) {
int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
if (ret) {
return ERR_PTR(ret);
}
int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
if (ret) {
return ERR_PTR(ret);
}
return nv_nvkms_memory->pWriteCombinedIORemapAddress;
if (nv_nvkms_memory->physically_mapped) {
return nv_nvkms_memory->pWriteCombinedIORemapAddress;
}
/*
* If this buffer isn't physically mapped, it might be backed by struct
* pages. Use vmap in that case. Do a noncached mapping for system memory
* as display is non io-coherent device in case of Tegra.
*/
if (nv_nvkms_memory->pages_count > 0) {
return nv_drm_vmap(nv_nvkms_memory->pages,
nv_nvkms_memory->pages_count,
false);
}
return ERR_PTR(-ENOMEM);
}
static void __nv_drm_gem_nvkms_prime_vunmap(
struct nv_drm_gem_object *nv_gem,
void *address)
{
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
to_nv_nvkms_memory(nv_gem);
if (!nv_nvkms_memory->physically_mapped &&
nv_nvkms_memory->pages_count > 0) {
nv_drm_vunmap(address);
}
}
static int __nv_drm_gem_map_nvkms_memory_offset(
@@ -188,17 +241,7 @@ static int __nv_drm_gem_map_nvkms_memory_offset(
struct nv_drm_gem_object *nv_gem,
uint64_t *offset)
{
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
to_nv_nvkms_memory(nv_gem);
if (!nv_nvkms_memory->physically_mapped) {
int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
if (ret) {
return ret;
}
}
return nv_drm_gem_create_mmap_offset(&nv_nvkms_memory->base, offset);
return nv_drm_gem_create_mmap_offset(nv_gem, offset);
}
static struct sg_table *__nv_drm_gem_nvkms_memory_prime_get_sg_table(
@@ -210,7 +253,7 @@ static struct sg_table *__nv_drm_gem_nvkms_memory_prime_get_sg_table(
struct sg_table *sg_table;
if (nv_nvkms_memory->pages_count == 0) {
NV_DRM_DEV_LOG_ERR(
NV_DRM_DEV_DEBUG_DRIVER(
nv_dev,
"Cannot create sg_table for NvKmsKapiMemory 0x%p",
nv_gem->pMemory);
@@ -228,6 +271,7 @@ const struct nv_drm_gem_object_funcs nv_gem_nvkms_memory_ops = {
.free = __nv_drm_gem_nvkms_memory_free,
.prime_dup = __nv_drm_gem_nvkms_prime_dup,
.prime_vmap = __nv_drm_gem_nvkms_prime_vmap,
.prime_vunmap = __nv_drm_gem_nvkms_prime_vunmap,
.mmap = __nv_drm_gem_nvkms_mmap,
.handle_vma_fault = __nv_drm_gem_nvkms_handle_vma_fault,
.create_mmap_offset = __nv_drm_gem_map_nvkms_memory_offset,
@@ -243,16 +287,26 @@ static int __nv_drm_nvkms_gem_obj_init(
NvU64 *pages = NULL;
NvU32 numPages = 0;
if ((size % PAGE_SIZE) != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"NvKmsKapiMemory 0x%p size should be in a multiple of page size to "
"create a gem object",
pMemory);
return -EINVAL;
}
mutex_init(&nv_nvkms_memory->map_lock);
nv_nvkms_memory->pPhysicalAddress = NULL;
nv_nvkms_memory->pWriteCombinedIORemapAddress = NULL;
nv_nvkms_memory->physically_mapped = false;
if (!nvKms->getMemoryPages(nv_dev->pDevice,
if (!nvKms->isVidmem(pMemory) &&
!nvKms->getMemoryPages(nv_dev->pDevice,
pMemory,
&pages,
&numPages) &&
!nv_dev->hasVideoMemory) {
/* GetMemoryPages may fail for vidmem allocations,
&numPages)) {
/* GetMemoryPages will fail for vidmem allocations,
* but it should not fail for sysmem allocations. */
NV_DRM_DEV_LOG_ERR(nv_dev,
"Failed to get memory pages for NvKmsKapiMemory 0x%p",
@@ -279,6 +333,7 @@ int nv_drm_dumb_create(
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
uint8_t compressible = 0;
struct NvKmsKapiMemory *pMemory;
struct NvKmsKapiAllocateMemoryParams allocParams = { };
int ret = 0;
args->pitch = roundup(args->width * ((args->bpp + 7) >> 3),
@@ -296,25 +351,19 @@ int nv_drm_dumb_create(
goto fail;
}
if (nv_dev->hasVideoMemory) {
pMemory = nvKms->allocateVideoMemory(nv_dev->pDevice,
NvKmsSurfaceMemoryLayoutPitch,
NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
args->size,
&compressible);
} else {
pMemory = nvKms->allocateSystemMemory(nv_dev->pDevice,
NvKmsSurfaceMemoryLayoutPitch,
NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT,
args->size,
&compressible);
}
allocParams.layout = NvKmsSurfaceMemoryLayoutPitch;
allocParams.type = NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT;
allocParams.size = args->size;
allocParams.noDisplayCaching = true;
allocParams.useVideoMemory = nv_dev->hasVideoMemory;
allocParams.compressible = &compressible;
pMemory = nvKms->allocateMemory(nv_dev->pDevice, &allocParams);
if (pMemory == NULL) {
ret = -ENOMEM;
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to allocate NvKmsKapiMemory for dumb object of size %llu",
"Failed to allocate NvKmsKapiMemory for dumb object of size %" NvU64_fmtu,
args->size);
goto nvkms_alloc_memory_failed;
}
@@ -358,7 +407,7 @@ int nv_drm_gem_import_nvkms_memory_ioctl(struct drm_device *dev,
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = -EINVAL;
ret = -EOPNOTSUPP;
goto failed;
}
@@ -408,7 +457,7 @@ int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev,
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = -EINVAL;
ret = -EOPNOTSUPP;
goto done;
}
@@ -419,7 +468,6 @@ int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev,
}
if ((nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(
dev,
filep,
p->handle)) == NULL) {
ret = -EINVAL;
@@ -456,12 +504,11 @@ int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev,
struct drm_nvidia_gem_alloc_nvkms_memory_params *p = data;
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = NULL;
struct NvKmsKapiMemory *pMemory;
enum NvKmsSurfaceMemoryLayout layout;
enum NvKmsKapiAllocationType type;
struct NvKmsKapiAllocateMemoryParams allocParams = { };
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = -EINVAL;
ret = -EOPNOTSUPP;
goto failed;
}
@@ -477,25 +524,15 @@ int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev,
goto failed;
}
layout = p->block_linear ?
allocParams.layout = p->block_linear ?
NvKmsSurfaceMemoryLayoutBlockLinear : NvKmsSurfaceMemoryLayoutPitch;
type = (p->flags & NV_GEM_ALLOC_NO_SCANOUT) ?
allocParams.type = (p->flags & NV_GEM_ALLOC_NO_SCANOUT) ?
NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN : NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT;
allocParams.size = p->memory_size;
allocParams.useVideoMemory = nv_dev->hasVideoMemory;
allocParams.compressible = &p->compressible;
if (nv_dev->hasVideoMemory) {
pMemory = nvKms->allocateVideoMemory(nv_dev->pDevice,
layout,
type,
p->memory_size,
&p->compressible);
} else {
pMemory = nvKms->allocateSystemMemory(nv_dev->pDevice,
layout,
type,
p->memory_size,
&p->compressible);
}
pMemory = nvKms->allocateMemory(nv_dev->pDevice, &allocParams);
if (pMemory == NULL) {
ret = -EINVAL;
NV_DRM_DEV_LOG_ERR(nv_dev,
@@ -529,14 +566,12 @@ static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup(
{
struct nv_drm_device *nv_dev = to_nv_device(dev);
const struct nv_drm_device *nv_dev_src;
const struct nv_drm_gem_nvkms_memory *nv_nvkms_memory_src;
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
struct NvKmsKapiMemory *pMemory;
BUG_ON(nv_gem_src == NULL || nv_gem_src->ops != &nv_gem_nvkms_memory_ops);
nv_dev_src = to_nv_device(nv_gem_src->base.dev);
nv_nvkms_memory_src = to_nv_nvkms_memory_const(nv_gem_src);
if ((nv_nvkms_memory =
nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
@@ -579,7 +614,6 @@ int nv_drm_dumb_map_offset(struct drm_file *file,
int ret = -EINVAL;
if ((nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(
dev,
file,
handle)) == NULL) {
NV_DRM_DEV_LOG_ERR(

View File

@@ -32,8 +32,15 @@
struct nv_drm_gem_nvkms_memory {
struct nv_drm_gem_object base;
/*
* Lock to protect concurrent writes to physically_mapped, pPhysicalAddress,
* and pWriteCombinedIORemapAddress.
*
* __nv_drm_gem_nvkms_map(), the sole writer, is structured such that
* readers are not required to hold the lock.
*/
struct mutex map_lock;
bool physically_mapped;
void *pPhysicalAddress;
void *pWriteCombinedIORemapAddress;
@@ -65,12 +72,11 @@ static inline struct nv_drm_gem_nvkms_memory *to_nv_nvkms_memory_const(
static inline
struct nv_drm_gem_nvkms_memory *nv_drm_gem_object_nvkms_memory_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
struct nv_drm_gem_object *nv_gem =
nv_drm_gem_object_lookup(dev, filp, handle);
nv_drm_gem_object_lookup(filp, handle);
if (nv_gem != NULL && nv_gem->ops != &nv_gem_nvkms_memory_ops) {
nv_drm_gem_object_unreference_unlocked(nv_gem);

View File

@@ -24,9 +24,7 @@
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
#include "nvidia-drm-gem-user-memory.h"
#include "nvidia-drm-helper.h"
@@ -35,6 +33,11 @@
#include "linux/dma-buf.h"
#include "linux/mm.h"
#include "nv-mm.h"
#include "linux/pfn_t.h"
#if defined(NV_BSD)
#include <vm/vm_pageout.h>
#endif
static inline
void __nv_drm_gem_user_memory_free(struct nv_drm_gem_object *nv_gem)
@@ -64,7 +67,8 @@ static void *__nv_drm_gem_user_memory_prime_vmap(
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
return nv_drm_vmap(nv_user_memory->pages,
nv_user_memory->pages_count);
nv_user_memory->pages_count,
true);
}
static void __nv_drm_gem_user_memory_prime_vunmap(
@@ -99,21 +103,16 @@ static int __nv_drm_gem_user_memory_mmap(struct nv_drm_gem_object *nv_gem,
return 0;
}
static vm_fault_t __nv_drm_gem_user_memory_handle_vma_fault(
struct nv_drm_gem_object *nv_gem,
#if defined(NV_LINUX) && !defined(NV_VMF_INSERT_MIXED_PRESENT)
static vm_fault_t __nv_vm_insert_mixed_helper(
struct vm_area_struct *vma,
struct vm_fault *vmf)
unsigned long address,
unsigned long pfn)
{
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
unsigned long address = nv_page_fault_va(vmf);
struct drm_gem_object *gem = vma->vm_private_data;
unsigned long page_offset;
vm_fault_t ret;
int ret;
page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node);
ret = vm_insert_mixed(vma, address, pfn_to_pfn_t(pfn));
BUG_ON(page_offset >= nv_user_memory->pages_count);
ret = vm_insert_page(vma, address, nv_user_memory->pages[page_offset]);
switch (ret) {
case 0:
case -EBUSY:
@@ -121,18 +120,38 @@ static vm_fault_t __nv_drm_gem_user_memory_handle_vma_fault(
* EBUSY indicates that another thread already handled
* the faulted range.
*/
ret = VM_FAULT_NOPAGE;
break;
return VM_FAULT_NOPAGE;
case -ENOMEM:
ret = VM_FAULT_OOM;
break;
return VM_FAULT_OOM;
default:
WARN_ONCE(1, "Unhandled error in %s: %d\n", __FUNCTION__, ret);
ret = VM_FAULT_SIGBUS;
break;
return VM_FAULT_SIGBUS;
}
}
#endif
return ret;
static vm_fault_t __nv_drm_gem_user_memory_handle_vma_fault(
struct nv_drm_gem_object *nv_gem,
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
unsigned long address = vmf->address;
struct drm_gem_object *gem = vma->vm_private_data;
unsigned long page_offset;
unsigned long pfn;
page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node);
BUG_ON(page_offset >= nv_user_memory->pages_count);
pfn = page_to_pfn(nv_user_memory->pages[page_offset]);
#if !defined(NV_LINUX)
return vmf_insert_pfn(vma, address, pfn);
#elif defined(NV_VMF_INSERT_MIXED_PRESENT)
return vmf_insert_mixed(vma, address, pfn_to_pfn_t(pfn));
#else
return __nv_vm_insert_mixed_helper(vma, address, pfn);
#endif
}
static int __nv_drm_gem_user_create_mmap_offset(
@@ -170,7 +189,7 @@ int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev,
if ((params->size % PAGE_SIZE) != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Userspace memory 0x%llx size should be in a multiple of page "
"Userspace memory 0x%" NvU64_fmtx " size should be in a multiple of page "
"size to create a gem object",
params->address);
return -EINVAL;
@@ -183,7 +202,7 @@ int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev,
if (ret != 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to lock user pages for address 0x%llx: %d",
"Failed to lock user pages for address 0x%" NvU64_fmtx ": %d",
params->address, ret);
return ret;
}

View File

@@ -52,12 +52,11 @@ int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev,
static inline
struct nv_drm_gem_user_memory *nv_drm_gem_object_user_memory_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
struct nv_drm_gem_object *nv_gem =
nv_drm_gem_object_lookup(dev, filp, handle);
nv_drm_gem_object_lookup(filp, handle);
if (nv_gem != NULL && nv_gem->ops != &__nv_gem_user_memory_ops) {
nv_drm_gem_object_unreference_unlocked(nv_gem);

View File

@@ -35,16 +35,12 @@
#include "nvidia-drm-gem-dma-buf.h"
#include "nvidia-drm-gem-nvkms-memory.h"
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
#if defined(NV_DRM_DRM_FILE_H_PRESENT)
#include <drm/drm_file.h>
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
#include <drm/drm_vma_manager.h>
#endif
#include "linux/dma-buf.h"
@@ -58,7 +54,7 @@ void nv_drm_gem_free(struct drm_gem_object *gem)
/* Cleanup core gem object */
drm_gem_object_release(&nv_gem->base);
#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
#if !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
nv_dma_resv_fini(&nv_gem->resv);
#endif
@@ -135,7 +131,7 @@ void nv_drm_gem_object_init(struct nv_drm_device *nv_dev,
/* Initialize the gem object */
#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
#if !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
nv_dma_resv_init(&nv_gem->resv);
#endif
@@ -144,12 +140,17 @@ void nv_drm_gem_object_init(struct nv_drm_device *nv_dev,
#endif
drm_gem_private_object_init(dev, &nv_gem->base, size);
/* Create mmap offset early for drm_gem_prime_mmap(), if possible. */
if (nv_gem->ops->create_mmap_offset) {
uint64_t offset;
nv_gem->ops->create_mmap_offset(nv_dev, nv_gem, &offset);
}
}
struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
#if defined(NV_DMA_BUF_OWNER_PRESENT)
struct drm_gem_object *gem_dst;
struct nv_drm_gem_object *nv_gem_src;
@@ -166,11 +167,13 @@ struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev,
*/
gem_dst = nv_gem_src->ops->prime_dup(dev, nv_gem_src);
if (gem_dst)
return gem_dst;
if (gem_dst == NULL) {
return ERR_PTR(-ENOTSUPP);
}
return gem_dst;
}
}
#endif /* NV_DMA_BUF_OWNER_PRESENT */
return drm_gem_prime_import(dev, dma_buf);
}
@@ -222,8 +225,7 @@ int nv_drm_gem_map_offset_ioctl(struct drm_device *dev,
struct nv_drm_gem_object *nv_gem;
int ret;
if ((nv_gem = nv_drm_gem_object_lookup(dev,
filep,
if ((nv_gem = nv_drm_gem_object_lookup(filep,
params->handle)) == NULL) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
@@ -232,6 +234,7 @@ int nv_drm_gem_map_offset_ioctl(struct drm_device *dev,
return -EINVAL;
}
/* mmap offset creation is idempotent, fetch it by creating it again. */
if (nv_gem->ops->create_mmap_offset) {
ret = nv_gem->ops->create_mmap_offset(nv_dev, nv_gem, &params->offset);
} else {
@@ -258,8 +261,8 @@ int nv_drm_mmap(struct file *file, struct vm_area_struct *vma)
struct nv_drm_gem_object *nv_gem;
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
node = nv_drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
vma->vm_pgoff, vma_pages(vma));
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
vma->vm_pgoff, vma_pages(vma));
if (likely(node)) {
obj = container_of(node, struct drm_gem_object, vma_node);
/*
@@ -285,7 +288,7 @@ int nv_drm_mmap(struct file *file, struct vm_area_struct *vma)
goto done;
}
if (!nv_drm_vma_node_is_allowed(node, file)) {
if (!drm_vma_node_is_allowed(node, file->private_data)) {
ret = -EACCES;
goto done;
}
@@ -319,10 +322,10 @@ int nv_drm_gem_identify_object_ioctl(struct drm_device *dev,
struct nv_drm_gem_object *nv_gem = NULL;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
return -EINVAL;
return -EOPNOTSUPP;
}
nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(dev, filep, p->handle);
nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(filep, p->handle);
if (nv_dma_buf) {
p->object_type = NV_GEM_OBJECT_DMABUF;
nv_gem = &nv_dma_buf->base;
@@ -330,7 +333,7 @@ int nv_drm_gem_identify_object_ioctl(struct drm_device *dev,
}
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(dev, filep, p->handle);
nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(filep, p->handle);
if (nv_nvkms_memory) {
p->object_type = NV_GEM_OBJECT_NVKMS;
nv_gem = &nv_nvkms_memory->base;
@@ -338,7 +341,7 @@ int nv_drm_gem_identify_object_ioctl(struct drm_device *dev,
}
#endif
nv_user_memory = nv_drm_gem_object_user_memory_lookup(dev, filep, p->handle);
nv_user_memory = nv_drm_gem_object_user_memory_lookup(filep, p->handle);
if (nv_user_memory) {
p->object_type = NV_GEM_OBJECT_USERMEMORY;
nv_gem = &nv_user_memory->base;

View File

@@ -33,17 +33,12 @@
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_GEM_H_PRESENT)
#include <drm/drm_gem.h>
#endif
#include "nvkms-kapi.h"
#include "nv-mm.h"
#if defined(NV_DRM_FENCE_AVAILABLE)
#include "nvidia-dma-fence-helper.h"
#include "nvidia-dma-resv-helper.h"
#endif
#include "linux/dma-buf.h"
@@ -73,7 +68,7 @@ struct nv_drm_gem_object {
struct NvKmsKapiMemory *pMemory;
#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
#if !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
nv_dma_resv_t resv;
#endif
};
@@ -88,47 +83,14 @@ static inline struct nv_drm_gem_object *to_nv_gem_object(
return NULL;
}
/*
* drm_gem_object_{get/put}() added by commit
* e6b62714e87c8811d5564b6a0738dcde63a51774 (2017-02-28) and
* drm_gem_object_{reference/unreference}() removed by commit
* 3e70fd160cf0b1945225eaa08dd2cb8544f21cb8 (2018-11-15).
*/
static inline void
nv_drm_gem_object_reference(struct nv_drm_gem_object *nv_gem)
{
#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
drm_gem_object_get(&nv_gem->base);
#else
drm_gem_object_reference(&nv_gem->base);
#endif
}
static inline void
nv_drm_gem_object_unreference_unlocked(struct nv_drm_gem_object *nv_gem)
{
#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
#if defined(NV_DRM_GEM_OBJECT_PUT_UNLOCK_PRESENT)
drm_gem_object_put_unlocked(&nv_gem->base);
#else
drm_gem_object_put(&nv_gem->base);
#endif
#else
drm_gem_object_unreference_unlocked(&nv_gem->base);
#endif
}
static inline void
nv_drm_gem_object_unreference(struct nv_drm_gem_object *nv_gem)
{
#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT)
drm_gem_object_put(&nv_gem->base);
#else
drm_gem_object_unreference(&nv_gem->base);
#endif
}
static inline int nv_drm_gem_handle_create_drop_reference(
@@ -169,17 +131,10 @@ done:
void nv_drm_gem_free(struct drm_gem_object *gem);
static inline struct nv_drm_gem_object *nv_drm_gem_object_lookup(
struct drm_device *dev,
struct drm_file *filp,
u32 handle)
{
#if (NV_DRM_GEM_OBJECT_LOOKUP_ARGUMENT_COUNT == 3)
return to_nv_gem_object(drm_gem_object_lookup(dev, filp, handle));
#elif (NV_DRM_GEM_OBJECT_LOOKUP_ARGUMENT_COUNT == 2)
return to_nv_gem_object(drm_gem_object_lookup(filp, handle));
#else
#error "Unknown argument count of drm_gem_object_lookup()"
#endif
}
static inline int nv_drm_gem_handle_create(struct drm_file *filp,
@@ -189,7 +144,6 @@ static inline int nv_drm_gem_handle_create(struct drm_file *filp,
return drm_gem_handle_create(filp, &nv_gem->base, handle);
}
#if defined(NV_DRM_FENCE_AVAILABLE)
static inline nv_dma_resv_t *nv_drm_gem_res_obj(struct nv_drm_gem_object *nv_gem)
{
#if defined(NV_DRM_GEM_OBJECT_HAS_RESV)
@@ -198,7 +152,6 @@ static inline nv_dma_resv_t *nv_drm_gem_res_obj(struct nv_drm_gem_object *nv_gem
return nv_gem->base.dma_buf ? nv_gem->base.dma_buf->resv : &nv_gem->resv;
#endif
}
#endif
void nv_drm_gem_object_init(struct nv_drm_device *nv_dev,
struct nv_drm_gem_object *nv_gem,

View File

@@ -43,28 +43,7 @@
#include <drm/drm_atomic_uapi.h>
#endif
/*
* The inclusion of drm_framebuffer.h was removed from drm_crtc.h by commit
* 720cf96d8fecde29b72e1101f8a567a0ce99594f ("drm: Drop drm_framebuffer.h from
* drm_crtc.h") in linux-next, expected in v5.19-rc7.
*
* We only need drm_framebuffer.h for drm_framebuffer_put(), and it is always
* present (v4.9+) when drm_framebuffer_{put,get}() is present (v4.12+), so it
* is safe to unconditionally include it when drm_framebuffer_get() is present.
*/
#if defined(NV_DRM_FRAMEBUFFER_GET_PRESENT)
#include <drm/drm_framebuffer.h>
#endif
static void __nv_drm_framebuffer_put(struct drm_framebuffer *fb)
{
#if defined(NV_DRM_FRAMEBUFFER_GET_PRESENT)
drm_framebuffer_put(fb);
#else
drm_framebuffer_unreference(fb);
#endif
}
/*
* drm_atomic_helper_disable_all() has been added by commit
@@ -150,7 +129,6 @@ int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
goto free;
}
#if defined(NV_DRM_ROTATION_AVAILABLE)
nv_drm_for_each_plane(plane, dev) {
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
@@ -160,7 +138,6 @@ int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
plane_state->rotation = DRM_MODE_ROTATE_0;
}
#endif
nv_drm_for_each_connector_in_state(state, conn, conn_state, i) {
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
@@ -190,28 +167,14 @@ free:
WARN_ON(plane->state->crtc);
if (plane->old_fb)
__nv_drm_framebuffer_put(plane->old_fb);
drm_framebuffer_put(plane->old_fb);
}
plane->old_fb = NULL;
}
}
#if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT)
drm_atomic_state_put(state);
#else
if (ret != 0) {
drm_atomic_state_free(state);
} else {
/*
* In case of success, drm_atomic_commit() takes care to cleanup and
* free @state.
*
* Comment placed above drm_atomic_commit() says: The caller must not
* free or in any other way access @state. If the function fails then
* the caller must clean up @state itself.
*/
}
#endif
return ret;
}

View File

@@ -31,62 +31,26 @@
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_DRV_H_PRESENT)
#include <drm/drm_drv.h>
#endif
#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE) || defined(NV_DRM_ROTATION_AVAILABLE)
/* For DRM_ROTATE_* , DRM_REFLECT_* */
#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE)
#include <drm/drm_blend.h>
#endif
#if defined(NV_DRM_ROTATION_AVAILABLE)
/* For DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* */
/*
* For DRM_MODE_ROTATE_*, DRM_MODE_REFLECT_*, struct drm_color_ctm_3x4, and
* struct drm_color_lut.
*/
#include <uapi/drm/drm_mode.h>
#endif
#if defined(NV_DRM_ROTATION_AVAILABLE)
/*
* 19-05-2017 c2c446ad29437bb92b157423c632286608ebd3ec has added
* DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* to UAPI and removed
* DRM_ROTATE_* and DRM_REFLECT_*
*/
#if !defined(DRM_MODE_ROTATE_0)
#define DRM_MODE_ROTATE_0 DRM_ROTATE_0
#define DRM_MODE_ROTATE_90 DRM_ROTATE_90
#define DRM_MODE_ROTATE_180 DRM_ROTATE_180
#define DRM_MODE_ROTATE_270 DRM_ROTATE_270
#define DRM_MODE_REFLECT_X DRM_REFLECT_X
#define DRM_MODE_REFLECT_Y DRM_REFLECT_Y
#define DRM_MODE_ROTATE_MASK DRM_ROTATE_MASK
#define DRM_MODE_REFLECT_MASK DRM_REFLECT_MASK
#endif
#endif //NV_DRM_ROTATION_AVAILABLE
/*
* drm_dev_put() is added by commit 9a96f55034e41b4e002b767e9218d55f03bdff7d
* (2017-09-26) and drm_dev_unref() is removed by
* ba1d345401476a5f7fbad622607c5a1f95e59b31 (2018-11-15).
*
* drm_dev_unref() has been added and drm_dev_free() removed by commit -
*
* 2014-01-29: 099d1c290e2ebc3b798961a6c177c3aef5f0b789
* Commit 1e13c5644c44 ("drm/drm_mode_object: increase max objects to
* accommodate new color props") in Linux v6.8 increased the pre-object
* property limit to from 24 to 64.
*/
static inline void nv_drm_dev_free(struct drm_device *dev)
{
#if defined(NV_DRM_DEV_PUT_PRESENT)
drm_dev_put(dev);
#elif defined(NV_DRM_DEV_UNREF_PRESENT)
drm_dev_unref(dev);
#else
drm_dev_free(dev);
#endif
}
#define NV_DRM_USE_EXTENDED_PROPERTIES (DRM_OBJECT_MAX_PROPERTY >= 64)
#if defined(NV_DRM_DRM_PRIME_H_PRESENT)
#include <drm/drm_prime.h>
#endif
static inline struct sg_table*
nv_drm_prime_pages_to_sg(struct drm_device *dev,
@@ -154,18 +118,6 @@ nv_drm_prime_pages_to_sg(struct drm_device *dev,
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
#endif
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
#define nv_drm_for_each_connector(connector, conn_iter, dev) \
drm_for_each_connector_iter(connector, conn_iter)
#elif defined(drm_for_each_connector)
#define nv_drm_for_each_connector(connector, conn_iter, dev) \
drm_for_each_connector(connector, dev)
#else
#define nv_drm_for_each_connector(connector, conn_iter, dev) \
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); \
list_for_each_entry(connector, &(dev)->mode_config.connector_list, head)
#endif
#if defined(drm_for_each_encoder)
#define nv_drm_for_each_encoder(encoder, dev) \
drm_for_each_encoder(encoder, dev)
@@ -306,54 +258,38 @@ int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
for_each_plane_in_state(__state, plane, plane_state, __i)
#endif
static inline struct drm_connector *
nv_drm_connector_lookup(struct drm_device *dev, struct drm_file *filep,
uint32_t id)
{
#if !defined(NV_DRM_CONNECTOR_LOOKUP_PRESENT)
return drm_connector_find(dev, id);
#elif defined(NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG)
return drm_connector_lookup(dev, filep, id);
/*
* for_each_new_plane_in_state() was added by kernel commit
* 581e49fe6b411f407102a7f2377648849e0fa37f which was Signed-off-by:
* Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
* Daniel Vetter <daniel.vetter@ffwll.ch>
*
* This commit also added the old_state and new_state pointers to
* __drm_planes_state. Because of this, the best that can be done on kernel
* versions without this macro is for_each_plane_in_state.
*/
/**
* nv_drm_for_each_new_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer
* @plane: &struct drm_plane iteration cursor
* @new_plane_state: &struct drm_plane_state iteration cursor for the new state
* @__i: int iteration cursor, for macro-internal use
*
* This iterates over all planes in an atomic update, tracking only the new
* state. This is useful in enable functions, where we need the new state the
* hardware should be in when the atomic commit operation has completed.
*/
#if !defined(for_each_new_plane_in_state)
#define nv_drm_for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
nv_drm_for_each_plane_in_state(__state, plane, new_plane_state, __i)
#else
return drm_connector_lookup(dev, id);
#define nv_drm_for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
for_each_new_plane_in_state(__state, plane, new_plane_state, __i)
#endif
}
static inline void nv_drm_connector_put(struct drm_connector *connector)
{
#if defined(NV_DRM_CONNECTOR_PUT_PRESENT)
drm_connector_put(connector);
#elif defined(NV_DRM_CONNECTOR_LOOKUP_PRESENT)
drm_connector_unreference(connector);
#endif
}
static inline struct drm_crtc *
nv_drm_crtc_find(struct drm_device *dev, struct drm_file *filep, uint32_t id)
{
#if defined(NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG)
return drm_crtc_find(dev, filep, id);
#else
return drm_crtc_find(dev, id);
#endif
}
static inline struct drm_encoder *nv_drm_encoder_find(struct drm_device *dev,
uint32_t id)
{
#if defined(NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG)
return drm_encoder_find(dev, NULL /* file_priv */, id);
#else
return drm_encoder_find(dev, id);
#endif
}
#if defined(NV_DRM_DRM_AUTH_H_PRESENT)
#include <drm/drm_auth.h>
#endif
#if defined(NV_DRM_DRM_FILE_H_PRESENT)
#include <drm/drm_file.h>
#endif
/*
* drm_file_get_master() added by commit 56f0729a510f ("drm: protect drm_master
@@ -378,10 +314,6 @@ static inline struct drm_master *nv_drm_file_get_master(struct drm_file *filep)
* Ville Syrjälä <ville.syrjala@linux.intel.com>
*
* drm_connector_for_each_possible_encoder() is copied from
* include/drm/drm_connector.h and modified to use nv_drm_encoder_find()
* instead of drm_encoder_find().
*
* drm_connector_for_each_possible_encoder() is copied from
* include/drm/drm_connector.h @
* 83aefbb887b59df0b3520965c3701e01deacfc52
* which has the following copyright and license information:
@@ -407,9 +339,7 @@ static inline struct drm_master *nv_drm_file_get_master(struct drm_file *filep)
* OF THIS SOFTWARE.
*/
#if defined(NV_DRM_DRM_CONNECTOR_H_PRESENT)
#include <drm/drm_connector.h>
#endif
/**
* nv_drm_connector_for_each_possible_encoder - iterate connector's possible
@@ -428,8 +358,9 @@ static inline struct drm_master *nv_drm_file_get_master(struct drm_file *filep)
for ((__i) = 0; (__i) < ARRAY_SIZE((connector)->encoder_ids) && \
(connector)->encoder_ids[(__i)] != 0; (__i)++) \
for_each_if((encoder) = \
nv_drm_encoder_find((connector)->dev, \
(connector)->encoder_ids[(__i)]))
drm_encoder_find((connector)->dev, NULL, \
(connector)->encoder_ids[(__i)]))
#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \
{ \
@@ -484,80 +415,14 @@ nv_drm_connector_update_edid_property(struct drm_connector *connector,
#endif
}
#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT)
#include <drm/drm_connector.h>
static inline
void nv_drm_connector_list_iter_begin(struct drm_device *dev,
struct drm_connector_list_iter *iter)
{
#if defined(NV_DRM_CONNECTOR_LIST_ITER_BEGIN_PRESENT)
drm_connector_list_iter_begin(dev, iter);
#else
drm_connector_list_iter_get(dev, iter);
#endif
}
static inline
void nv_drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
{
#if defined(NV_DRM_CONNECTOR_LIST_ITER_BEGIN_PRESENT)
drm_connector_list_iter_end(iter);
#else
drm_connector_list_iter_put(iter);
#endif
}
#endif
/*
* The drm_format_num_planes() function was added by commit d0d110e09629 drm:
* Add drm_format_num_planes() utility function in v3.3 (2011-12-20). Prototype
* was moved from drm_crtc.h to drm_fourcc.h by commit ae4df11a0f53 (drm: Move
* format-related helpers to drm_fourcc.c) in v4.8 (2016-06-09).
* drm_format_num_planes() has been removed by commit 05c452c115bf (drm: Remove
* users of drm_format_num_planes) in v5.3 (2019-05-16).
*
* drm_format_info() is available only from v4.10 (2016-10-18), added by commit
* 84770cc24f3a (drm: Centralize format information).
*/
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
static inline int nv_drm_format_num_planes(uint32_t format)
{
#if defined(NV_DRM_FORMAT_NUM_PLANES_PRESENT)
return drm_format_num_planes(format);
#else
const struct drm_format_info *info = drm_format_info(format);
return info != NULL ? info->num_planes : 1;
#endif
}
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
/*
* DRM_FORMAT_MOD_LINEAR was also defined after the original modifier support
* was added to the kernel, as a more explicit alias of DRM_FORMAT_MOD_NONE
*/
#if !defined(DRM_FORMAT_MOD_VENDOR_NONE)
#define DRM_FORMAT_MOD_VENDOR_NONE 0
#endif
#if !defined(DRM_FORMAT_MOD_LINEAR)
#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0)
#endif
/*
* DRM_FORMAT_MOD_INVALID was defined after the original modifier support was
* added to the kernel, for use as a sentinel value.
*/
#if !defined(DRM_FORMAT_RESERVED)
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
#endif
#if !defined(DRM_FORMAT_MOD_INVALID)
#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED)
#endif
/*
* DRM_FORMAT_MOD_VENDOR_NVIDIA was previously called
* DRM_FORMAT_MOD_VNEDOR_NV.
@@ -580,11 +445,9 @@ static inline int nv_drm_format_num_planes(uint32_t format)
(((c) & 0x7) << 23)))
#endif
#endif /* defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) */
/*
* DRM_UNLOCKED was removed with linux-next commit 2798ffcc1d6a ("drm: Remove
* locking for legacy ioctls and DRM_UNLOCKED"), but it was previously made
* DRM_UNLOCKED was removed with commit 2798ffcc1d6a ("drm: Remove locking for
* legacy ioctls and DRM_UNLOCKED") in v6.8, but it was previously made
* implicit for all non-legacy DRM driver IOCTLs since Linux v4.10 commit
* fa5386459f06 "drm: Used DRM_LEGACY for all legacy functions" (Linux v4.4
* commit ea487835e887 "drm: Enforce unlocked ioctl operation for kms driver
@@ -596,74 +459,15 @@ static inline int nv_drm_format_num_planes(uint32_t format)
#endif
/*
* drm_vma_offset_exact_lookup_locked() were added
* by kernel commit 2225cfe46bcc which was Signed-off-by:
* Daniel Vetter <daniel.vetter@intel.com>
*
* drm_vma_offset_exact_lookup_locked() were copied from
* include/drm/drm_vma_manager.h @ 2225cfe46bcc
* which has the following copyright and license information:
*
* Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
* struct drm_color_ctm_3x4 was added by commit 6872a189be50 ("drm/amd/display:
* Add 3x4 CTM support for plane CTM") in v6.8. For backwards compatibility,
* define it when not present.
*/
#include <drm/drm_vma_manager.h>
/**
* nv_drm_vma_offset_exact_lookup_locked() - Look up node by exact address
* @mgr: Manager object
* @start: Start address (page-based, not byte-based)
* @pages: Size of object (page-based)
*
* Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node.
* It only returns the exact object with the given start address.
*
* RETURNS:
* Node at exact start address @start.
*/
static inline struct drm_vma_offset_node *
nv_drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages)
{
#if defined(NV_DRM_VMA_OFFSET_EXACT_LOOKUP_LOCKED_PRESENT)
return drm_vma_offset_exact_lookup_locked(mgr, start, pages);
#else
struct drm_vma_offset_node *node;
node = drm_vma_offset_lookup_locked(mgr, start, pages);
return (node && node->vm_node.start == start) ? node : NULL;
#if !defined(NV_DRM_COLOR_CTM_3X4_PRESENT)
struct drm_color_ctm_3x4 {
__u64 matrix[12];
};
#endif
}
static inline bool
nv_drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
struct file *filp)
{
#if defined(NV_DRM_VMA_NODE_IS_ALLOWED_HAS_TAG_ARG)
return drm_vma_node_is_allowed(node, filp->private_data);
#else
return drm_vma_node_is_allowed(node, filp);
#endif
}
#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */

View File

@@ -51,6 +51,8 @@
#define DRM_NVIDIA_SEMSURF_FENCE_CTX_CREATE 0x14
#define DRM_NVIDIA_SEMSURF_FENCE_CREATE 0x15
#define DRM_NVIDIA_SEMSURF_FENCE_WAIT 0x16
#define DRM_NVIDIA_SEMSURF_FENCE_ATTACH 0x17
#define DRM_NVIDIA_GET_DRM_FILE_UNIQUE_ID 0x18
#define DRM_IOCTL_NVIDIA_GEM_IMPORT_NVKMS_MEMORY \
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY), \
@@ -70,7 +72,7 @@
*
* 'warning: suggest parentheses around arithmetic in operand of |'
*/
#if defined(NV_LINUX)
#if defined(NV_LINUX) || defined(NV_BSD)
#define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED \
DRM_IO(DRM_COMMAND_BASE + DRM_NVIDIA_FENCE_SUPPORTED)
#define DRM_IOCTL_NVIDIA_DMABUF_SUPPORTED \
@@ -151,6 +153,16 @@
DRM_NVIDIA_SEMSURF_FENCE_WAIT), \
struct drm_nvidia_semsurf_fence_wait_params)
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_ATTACH \
DRM_IOW((DRM_COMMAND_BASE + \
DRM_NVIDIA_SEMSURF_FENCE_ATTACH), \
struct drm_nvidia_semsurf_fence_attach_params)
#define DRM_IOCTL_NVIDIA_GET_DRM_FILE_UNIQUE_ID \
DRM_IOWR((DRM_COMMAND_BASE + \
DRM_NVIDIA_GET_DRM_FILE_UNIQUE_ID), \
struct drm_nvidia_get_drm_file_unique_id_params)
struct drm_nvidia_gem_import_nvkms_memory_params {
uint64_t mem_size; /* IN */
@@ -170,13 +182,18 @@ struct drm_nvidia_gem_import_userspace_memory_params {
struct drm_nvidia_get_dev_info_params {
uint32_t gpu_id; /* OUT */
uint32_t mig_device; /* OUT */
uint32_t primary_index; /* OUT; the "card%d" value */
/* See DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D definitions of these */
uint32_t supports_alloc; /* OUT */
/* The generic_page_kind, page_kind_generation, and sector_layout
* fields are only valid if supports_alloc is true.
* See DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D definitions of these. */
uint32_t generic_page_kind; /* OUT */
uint32_t page_kind_generation; /* OUT */
uint32_t sector_layout; /* OUT */
uint32_t supports_sync_fd; /* OUT */
uint32_t supports_semsurf; /* OUT */
};
struct drm_nvidia_prime_fence_context_create_params {
@@ -289,13 +306,20 @@ struct drm_nvidia_get_connector_id_for_dpy_id_params {
uint32_t connectorId; /* OUT */
};
enum drm_nvidia_permissions_type {
NV_DRM_PERMISSIONS_TYPE_MODESET = 2,
NV_DRM_PERMISSIONS_TYPE_SUB_OWNER = 3
};
struct drm_nvidia_grant_permissions_params {
int32_t fd; /* IN */
uint32_t dpyId; /* IN */
uint32_t type; /* IN */
};
struct drm_nvidia_revoke_permissions_params {
uint32_t dpyId; /* IN */
uint32_t type; /* IN */
};
struct drm_nvidia_semsurf_fence_ctx_create_params {
@@ -351,4 +375,25 @@ struct drm_nvidia_semsurf_fence_wait_params {
* for the sync file */
};
struct drm_nvidia_semsurf_fence_attach_params {
uint32_t handle; /* IN GEM handle of buffer */
uint32_t fence_context_handle; /* IN GEM handle of fence context */
uint32_t timeout_value_ms; /* IN Timeout value in ms for the fence
* after which the fence will be signaled
* with its error status set to -ETIMEDOUT.
* Default timeout value is 5000ms */
uint32_t shared; /* IN If true, fence will reserve shared
* access to the buffer, otherwise it will
* reserve exclusive access */
uint64_t wait_value; /* IN Semaphore value to reach before signal */
};
struct drm_nvidia_get_drm_file_unique_id_params {
uint64_t id; /* OUT Unique ID of the DRM file */
};
#endif /* _UAPI_NVIDIA_DRM_IOCTL_H_ */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -21,8 +21,6 @@
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/err.h>
#include "nvidia-drm-os-interface.h"
#include "nvidia-drm.h"
@@ -31,257 +29,18 @@
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
#if defined(NV_LINUX_SYNC_FILE_H_PRESENT)
#include <linux/file.h>
#include <linux/sync_file.h>
#endif
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include "nv-mm.h"
MODULE_PARM_DESC(
modeset,
"Enable atomic kernel modesetting (1 = enable, 0 = disable (default))");
bool nv_drm_modeset_module_param = false;
module_param_named(modeset, nv_drm_modeset_module_param, bool, 0400);
void *nv_drm_calloc(size_t nmemb, size_t size)
{
size_t total_size = nmemb * size;
//
// Check for overflow.
//
if ((nmemb != 0) && ((total_size / nmemb) != size))
{
return NULL;
}
return kzalloc(nmemb * size, GFP_KERNEL);
}
void nv_drm_free(void *ptr)
{
if (IS_ERR(ptr)) {
return;
}
kfree(ptr);
}
char *nv_drm_asprintf(const char *fmt, ...)
{
va_list ap;
char *p;
va_start(ap, fmt);
p = kvasprintf(GFP_KERNEL, fmt, ap);
va_end(ap);
return p;
}
#if defined(NVCPU_X86) || defined(NVCPU_X86_64)
#define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory")
#elif defined(NVCPU_FAMILY_ARM)
#if defined(NVCPU_ARM)
#define WRITE_COMBINE_FLUSH() { dsb(); outer_sync(); }
#elif defined(NVCPU_AARCH64)
#define WRITE_COMBINE_FLUSH() mb()
#endif
#elif defined(NVCPU_PPC64LE)
#define WRITE_COMBINE_FLUSH() asm volatile("sync":::"memory")
#if defined(NV_DRM_FBDEV_AVAILABLE)
MODULE_PARM_DESC(
fbdev,
"Create a framebuffer device (1 = enable (default), 0 = disable)");
module_param_named(fbdev, nv_drm_fbdev_module_param, bool, 0400);
#endif
void nv_drm_write_combine_flush(void)
{
WRITE_COMBINE_FLUSH();
}
int nv_drm_lock_user_pages(unsigned long address,
unsigned long pages_count, struct page ***pages)
{
struct mm_struct *mm = current->mm;
struct page **user_pages;
int pages_pinned;
user_pages = nv_drm_calloc(pages_count, sizeof(*user_pages));
if (user_pages == NULL) {
return -ENOMEM;
}
nv_mmap_read_lock(mm);
pages_pinned = NV_PIN_USER_PAGES(address, pages_count, FOLL_WRITE,
user_pages, NULL);
nv_mmap_read_unlock(mm);
if (pages_pinned < 0 || (unsigned)pages_pinned < pages_count) {
goto failed;
}
*pages = user_pages;
return 0;
failed:
if (pages_pinned > 0) {
int i;
for (i = 0; i < pages_pinned; i++) {
NV_UNPIN_USER_PAGE(user_pages[i]);
}
}
nv_drm_free(user_pages);
return (pages_pinned < 0) ? pages_pinned : -EINVAL;
}
void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages)
{
unsigned long i;
for (i = 0; i < pages_count; i++) {
set_page_dirty_lock(pages[i]);
NV_UNPIN_USER_PAGE(pages[i]);
}
nv_drm_free(pages);
}
void *nv_drm_vmap(struct page **pages, unsigned long pages_count)
{
return vmap(pages, pages_count, VM_USERMAP, PAGE_KERNEL);
}
void nv_drm_vunmap(void *address)
{
vunmap(address);
}
bool nv_drm_workthread_init(nv_drm_workthread *worker, const char *name)
{
worker->shutting_down = false;
if (nv_kthread_q_init(&worker->q, name)) {
return false;
}
spin_lock_init(&worker->lock);
return true;
}
void nv_drm_workthread_shutdown(nv_drm_workthread *worker)
{
unsigned long flags;
spin_lock_irqsave(&worker->lock, flags);
worker->shutting_down = true;
spin_unlock_irqrestore(&worker->lock, flags);
nv_kthread_q_stop(&worker->q);
}
void nv_drm_workthread_work_init(nv_drm_work *work,
void (*callback)(void *),
void *arg)
{
nv_kthread_q_item_init(work, callback, arg);
}
int nv_drm_workthread_add_work(nv_drm_workthread *worker, nv_drm_work *work)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&worker->lock, flags);
if (!worker->shutting_down) {
ret = nv_kthread_q_schedule_q_item(&worker->q, work);
}
spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
void nv_drm_timer_setup(nv_drm_timer *timer, void (*callback)(nv_drm_timer *nv_drm_timer))
{
nv_timer_setup(timer, callback);
}
void nv_drm_mod_timer(nv_drm_timer *timer, unsigned long timeout_native)
{
mod_timer(&timer->kernel_timer, timeout_native);
}
unsigned long nv_drm_timer_now(void)
{
return jiffies;
}
unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms)
{
return jiffies + msecs_to_jiffies(relative_timeout_ms);
}
bool nv_drm_del_timer_sync(nv_drm_timer *timer)
{
if (del_timer_sync(&timer->kernel_timer)) {
return true;
} else {
return false;
}
}
#if defined(NV_DRM_FENCE_AVAILABLE)
int nv_drm_create_sync_file(nv_dma_fence_t *fence)
{
#if defined(NV_LINUX_SYNC_FILE_H_PRESENT)
struct sync_file *sync;
int fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0) {
return fd;
}
/* sync_file_create() generates its own reference to the fence */
sync = sync_file_create(fence);
if (IS_ERR(sync)) {
put_unused_fd(fd);
return PTR_ERR(sync);
}
fd_install(fd, sync->file);
return fd;
#else /* defined(NV_LINUX_SYNC_FILE_H_PRESENT) */
return -EINVAL;
#endif /* defined(NV_LINUX_SYNC_FILE_H_PRESENT) */
}
nv_dma_fence_t *nv_drm_sync_file_get_fence(int fd)
{
#if defined(NV_SYNC_FILE_GET_FENCE_PRESENT)
return sync_file_get_fence(fd);
#else /* defined(NV_SYNC_FILE_GET_FENCE_PRESENT) */
return NULL;
#endif /* defined(NV_SYNC_FILE_GET_FENCE_PRESENT) */
}
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
void nv_drm_yield(void)
{
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
}
#endif /* NV_DRM_AVAILABLE */
/*************************************************************************

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015, 2025, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -34,14 +34,19 @@
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_VBLANK_H_PRESENT)
#include <drm/drm_vblank.h>
#endif
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST)
#include <linux/nvhost.h>
#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT)
#include <linux/host1x-next.h>
#endif
#include <linux/dma-fence.h>
struct nv_drm_atomic_state {
struct NvKmsKapiRequestedModeSetConfig config;
struct drm_atomic_state base;
@@ -146,6 +151,163 @@ static int __nv_drm_put_back_post_fence_fd(
return ret;
}
struct nv_drm_plane_fence_cb_data {
struct dma_fence_cb dma_fence_cb;
struct nv_drm_device *nv_dev;
NvU32 semaphore_index;
};
static void
__nv_drm_plane_fence_cb(
struct dma_fence *fence,
struct dma_fence_cb *cb_data
)
{
struct nv_drm_plane_fence_cb_data *fence_data =
container_of(cb_data, typeof(*fence_data), dma_fence_cb);
struct nv_drm_device *nv_dev = fence_data->nv_dev;
dma_fence_put(fence);
nvKms->signalDisplaySemaphore(nv_dev->pDevice, fence_data->semaphore_index);
nv_drm_free(fence_data);
}
static int __nv_drm_convert_in_fences(
struct nv_drm_device *nv_dev,
struct drm_atomic_state *state,
struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
struct drm_plane *plane = NULL;
struct drm_plane_state *plane_state = NULL;
struct nv_drm_plane *nv_plane = NULL;
struct NvKmsKapiLayerRequestedConfig *plane_req_config = NULL;
struct NvKmsKapiHeadRequestedConfig *head_req_config =
&to_nv_crtc_state(crtc_state)->req_config;
struct nv_drm_plane_fence_cb_data *fence_data;
uint32_t semaphore_index;
uint32_t idx_count;
int ret, i;
if (!crtc_state->active) {
return 0;
}
nv_drm_for_each_new_plane_in_state(state, plane, plane_state, i) {
if ((plane->type == DRM_PLANE_TYPE_CURSOR) ||
(plane_state->crtc != crtc) ||
(plane_state->fence == NULL)) {
continue;
}
nv_plane = to_nv_plane(plane);
plane_req_config =
&head_req_config->layerRequestedConfig[nv_plane->layer_idx];
if (nv_dev->supportsSyncpts) {
#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST)
#if defined(NV_NVHOST_DMA_FENCE_UNPACK_PRESENT)
int ret =
nvhost_dma_fence_unpack(
plane_state->fence,
&plane_req_config->config.syncParams.u.syncpt.preSyncptId,
&plane_req_config->config.syncParams.u.syncpt.preSyncptValue);
if (ret == 0) {
plane_req_config->config.syncParams.preSyncptSpecified = true;
continue;
}
#endif
#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT)
int ret =
host1x_fence_extract(
plane_state->fence,
&plane_req_config->config.syncParams.u.syncpt.preSyncptId,
&plane_req_config->config.syncParams.u.syncpt.preSyncptValue);
if (ret == 0) {
plane_req_config->config.syncParams.preSyncptSpecified = true;
continue;
}
#endif
}
/*
* Syncpt extraction failed, or syncpts are not supported.
* Use general DRM fence support with semaphores instead.
*/
if (plane_req_config->config.syncParams.postSyncptRequested) {
// Can't mix Syncpts and semaphores in a given request.
return -EINVAL;
}
for (idx_count = 0; idx_count < nv_dev->display_semaphores.count; idx_count++) {
semaphore_index = nv_drm_next_display_semaphore(nv_dev);
if (nvKms->tryInitDisplaySemaphore(nv_dev->pDevice, semaphore_index)) {
break;
}
}
if (idx_count == nv_dev->display_semaphores.count) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to initialize semaphore for plane fence");
/*
* This should only happen if the semaphore pool was somehow
* exhausted. Waiting a bit and retrying may help in that case.
*/
return -EAGAIN;
}
plane_req_config->config.syncParams.semaphoreSpecified = true;
plane_req_config->config.syncParams.u.semaphore.index = semaphore_index;
fence_data = nv_drm_calloc(1, sizeof(*fence_data));
if (!fence_data) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed to allocate callback data for plane fence");
nvKms->cancelDisplaySemaphore(nv_dev->pDevice, semaphore_index);
return -ENOMEM;
}
fence_data->nv_dev = nv_dev;
fence_data->semaphore_index = semaphore_index;
ret = dma_fence_add_callback(plane_state->fence,
&fence_data->dma_fence_cb,
__nv_drm_plane_fence_cb);
switch (ret) {
case -ENOENT:
/* The fence is already signaled */
__nv_drm_plane_fence_cb(plane_state->fence,
&fence_data->dma_fence_cb);
#if defined(fallthrough)
fallthrough;
#else
/* Fallthrough */
#endif
case 0:
/*
* The plane state's fence reference has either been consumed or
* belongs to the outstanding callback now.
*/
plane_state->fence = NULL;
break;
default:
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Failed plane fence callback registration");
/* Fence callback registration failed */
nvKms->cancelDisplaySemaphore(nv_dev->pDevice, semaphore_index);
nv_drm_free(fence_data);
return ret;
}
}
return 0;
}
static int __nv_drm_get_syncpt_data(
struct nv_drm_device *nv_dev,
struct drm_crtc *crtc,
@@ -237,6 +399,33 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
int i;
int ret;
/*
* If sub-owner permission was granted to another NVKMS client, disallow
* modesets through the DRM interface.
*/
if (nv_dev->subOwnershipGranted) {
return -EINVAL;
}
if (commit) {
/*
* This function does what is necessary to prepare the framebuffers
* attached to each new plane in the state for scan out, mostly by
* calling back into driver callbacks the NVIDIA driver does not
* provide. The end result is that all it does on the NVIDIA driver
* is populate the plane state's dma fence pointers with any implicit
* sync fences attached to the GEM objects associated with those planes
* in the new state, prefering explicit sync fences when appropriate.
* This must be done prior to converting the per-plane fences to
* semaphore waits below.
*/
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret) {
return ret;
}
}
memset(requested_config, 0, sizeof(*requested_config));
/* Loop over affected crtcs and construct NvKmsKapiRequestedModeSetConfig */
@@ -250,11 +439,6 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
commit ? crtc->state : crtc_state;
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
requested_config->headRequestedConfig[nv_crtc->head] =
to_nv_crtc_state(new_crtc_state)->req_config;
requested_config->headsMask |= 1 << nv_crtc->head;
if (commit) {
struct drm_crtc_state *old_crtc_state = crtc_state;
struct nv_drm_crtc_state *nv_new_crtc_state =
@@ -274,10 +458,25 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
nv_new_crtc_state->nv_flip = NULL;
}
#if defined(NV_DRM_CRTC_STATE_HAS_VRR_ENABLED)
requested_config->headRequestedConfig[nv_crtc->head].modeSetConfig.vrrEnabled = new_crtc_state->vrr_enabled;
#endif
ret = __nv_drm_convert_in_fences(nv_dev,
state,
crtc,
new_crtc_state);
if (ret != 0) {
return ret;
}
}
/*
* Do this deep copy after calling __nv_drm_convert_in_fences,
* which modifies the new CRTC state's req_config member
*/
requested_config->headRequestedConfig[nv_crtc->head] =
to_nv_crtc_state(new_crtc_state)->req_config;
requested_config->headsMask |= 1 << nv_crtc->head;
}
if (commit && nvKms->systemInfo.bAllowWriteCombining) {
@@ -292,7 +491,9 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
requested_config,
&reply_config,
commit)) {
return -EINVAL;
if (commit || reply_config.flipResult != NV_KMS_FLIP_RESULT_IN_PROGRESS) {
return -EINVAL;
}
}
if (commit && nv_dev->supportsSyncpts) {
@@ -306,6 +507,10 @@ nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
}
}
if (commit && nv_dev->requiresVrrSemaphores && reply_config.vrrFlip) {
nvKms->signalVrrSemaphore(nv_dev->pDevice, reply_config.vrrSemaphoreIndex);
}
return 0;
}
@@ -314,6 +519,48 @@ int nv_drm_atomic_check(struct drm_device *dev,
{
int ret = 0;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int i;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
int j;
bool cursor_surface_changed;
bool cursor_only_commit;
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
/*
* Committing cursor surface change without any other plane change can
* cause cursor surface in use by HW to be freed prematurely. Add all
* planes to the commit to avoid this. This is a workaround for bug 4966645.
*/
cursor_surface_changed = false;
cursor_only_commit = true;
nv_drm_for_each_plane_in_state(crtc_state->state, plane, plane_state, j) {
if (plane->type == DRM_PLANE_TYPE_CURSOR) {
if (plane_state->fb != plane->state->fb) {
cursor_surface_changed = true;
}
} else {
cursor_only_commit = false;
break;
}
}
/*
* if the color management changed on the crtc, we need to update the
* crtc's plane's CSC matrices, so add the crtc's planes to the commit
*/
if (crtc_state->color_mgmt_changed ||
(cursor_surface_changed && cursor_only_commit)) {
if ((ret = drm_atomic_add_affected_planes(state, crtc)) != 0) {
goto done;
}
}
}
if ((ret = drm_atomic_helper_check(dev, state)) != 0) {
goto done;
}
@@ -388,47 +635,84 @@ int nv_drm_atomic_commit(struct drm_device *dev,
struct nv_drm_device *nv_dev = to_nv_device(dev);
/*
* drm_mode_config_funcs::atomic_commit() mandates to return -EBUSY
* for nonblocking commit if previous updates (commit tasks/flip event) are
* pending. In case of blocking commits it mandates to wait for previous
* updates to complete.
* XXX: drm_mode_config_funcs::atomic_commit() mandates to return -EBUSY
* for nonblocking commit if the commit would need to wait for previous
* updates (commit tasks/flip event) to complete. In case of blocking
* commits it mandates to wait for previous updates to complete. However,
* the kernel DRM-KMS documentation does explicitly allow maintaining a
* queue of outstanding commits.
*
* Our system already implements such a queue, but due to
* bug 4054608, it is currently not used.
*/
if (nonblock) {
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
/*
* Here you aren't required to hold nv_drm_crtc::flip_list_lock
* because:
*
* The core DRM driver acquires lock for all affected crtcs before
* calling into ->commit() hook, therefore it is not possible for
* other threads to call into ->commit() hook affecting same crtcs
* and enqueue flip objects into flip_list -
*
* nv_drm_atomic_commit_internal()
* |-> nv_drm_atomic_apply_modeset_config(commit=true)
* |-> nv_drm_crtc_enqueue_flip()
*
* Only possibility is list_empty check races with code path
* dequeuing flip object -
*
* __nv_drm_handle_flip_event()
* |-> nv_drm_crtc_dequeue_flip()
*
* But this race condition can't lead list_empty() to return
* incorrect result. nv_drm_crtc_dequeue_flip() in the middle of
* updating the list could not trick us into thinking the list is
* empty when it isn't.
*/
/*
* Here you aren't required to hold nv_drm_crtc::flip_list_lock
* because:
*
* The core DRM driver acquires lock for all affected crtcs before
* calling into ->commit() hook, therefore it is not possible for
* other threads to call into ->commit() hook affecting same crtcs
* and enqueue flip objects into flip_list -
*
* nv_drm_atomic_commit_internal()
* |-> nv_drm_atomic_apply_modeset_config(commit=true)
* |-> nv_drm_crtc_enqueue_flip()
*
* Only possibility is list_empty check races with code path
* dequeuing flip object -
*
* __nv_drm_handle_flip_event()
* |-> nv_drm_crtc_dequeue_flip()
*
* But this race condition can't lead list_empty() to return
* incorrect result. nv_drm_crtc_dequeue_flip() in the middle of
* updating the list could not trick us into thinking the list is
* empty when it isn't.
*/
if (nonblock) {
if (!list_empty(&nv_crtc->flip_list)) {
return -EBUSY;
}
} else {
if (wait_event_timeout(
nv_dev->flip_event_wq,
list_empty(&nv_crtc->flip_list),
3 * HZ /* 3 second */) == 0) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Flip event timeout on head %u", nv_crtc->head);
}
}
/*
* If the legacy LUT needs to be updated, ensure that the previous LUT
* update is complete first.
*/
if (crtc_state->color_mgmt_changed) {
NvBool complete = nvKms->checkLutNotifier(nv_dev->pDevice,
nv_crtc->head,
!nonblock /* waitForCompletion */);
/* If checking the LUT notifier failed, assume no LUT notifier is set. */
if (!complete) {
if (nonblock) {
return -EBUSY;
} else {
/*
* checkLutNotifier should wait on the notifier in this
* case, so we should only get here if the wait timed out.
*/
NV_DRM_DEV_LOG_ERR(
nv_dev,
"LUT notifier timeout on head %u", nv_crtc->head);
}
}
}
}
#if defined(NV_DRM_ATOMIC_HELPER_SWAP_STATE_HAS_STALL_ARG)
/*
* nv_drm_atomic_commit_internal()
* implements blocking/non-blocking atomic commit using
@@ -439,18 +723,10 @@ int nv_drm_atomic_commit(struct drm_device *dev,
* expected.
*/
#if defined(NV_DRM_ATOMIC_HELPER_SWAP_STATE_RETURN_INT)
ret = drm_atomic_helper_swap_state(state, false /* stall */);
if (WARN_ON(ret != 0)) {
return ret;
}
#else
drm_atomic_helper_swap_state(state, false /* stall */);
#endif
#else
drm_atomic_helper_swap_state(dev, state);
#endif
/*
* nv_drm_atomic_commit_internal() must not return failure after
@@ -547,20 +823,29 @@ int nv_drm_atomic_commit(struct drm_device *dev,
NV_DRM_DEV_LOG_ERR(
nv_dev,
"Flip event timeout on head %u", nv_crtc->head);
while (!list_empty(&nv_crtc->flip_list)) {
__nv_drm_handle_flip_event(nv_crtc);
}
}
if (crtc_state->color_mgmt_changed) {
NvBool complete = nvKms->checkLutNotifier(nv_dev->pDevice,
nv_crtc->head,
true /* waitForCompletion */);
if (!complete) {
NV_DRM_DEV_LOG_ERR(
nv_dev,
"LUT notifier timeout on head %u", nv_crtc->head);
}
}
}
}
done:
#if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT)
/*
* If ref counting is present, state will be freed when the caller
* drops its reference after we return.
* State will be freed when the caller drops its reference after we return.
*/
#else
drm_atomic_state_free(state);
#endif
return 0;
}

View File

@@ -0,0 +1,257 @@
/*
* Copyright (c) 2015-2025, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/slab.h>
#include "nvidia-drm-os-interface.h"
#if defined(NV_DRM_AVAILABLE)
#include <linux/file.h>
#include <linux/sync_file.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/device.h>
#include "nv-mm.h"
#if defined(NV_DRM_DRMP_H_PRESENT)
#include <drm/drmP.h>
#endif
bool nv_drm_modeset_module_param = false;
bool nv_drm_fbdev_module_param = true;
void *nv_drm_calloc(size_t nmemb, size_t size)
{
size_t total_size = nmemb * size;
//
// Check for overflow.
//
if ((nmemb != 0) && ((total_size / nmemb) != size))
{
return NULL;
}
return kzalloc(nmemb * size, GFP_KERNEL);
}
void nv_drm_free(void *ptr)
{
if (IS_ERR(ptr)) {
return;
}
kfree(ptr);
}
#if defined(NVCPU_X86) || defined(NVCPU_X86_64)
#define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory")
#elif defined(NVCPU_PPC64LE)
#define WRITE_COMBINE_FLUSH() asm volatile("sync":::"memory")
#else
#define WRITE_COMBINE_FLUSH() mb()
#endif
void nv_drm_write_combine_flush(void)
{
WRITE_COMBINE_FLUSH();
}
int nv_drm_lock_user_pages(unsigned long address,
unsigned long pages_count, struct page ***pages)
{
struct mm_struct *mm = current->mm;
struct page **user_pages;
int pages_pinned;
user_pages = nv_drm_calloc(pages_count, sizeof(*user_pages));
if (user_pages == NULL) {
return -ENOMEM;
}
nv_mmap_read_lock(mm);
pages_pinned = NV_PIN_USER_PAGES(address, pages_count, FOLL_WRITE,
user_pages);
nv_mmap_read_unlock(mm);
if (pages_pinned < 0 || (unsigned)pages_pinned < pages_count) {
goto failed;
}
*pages = user_pages;
return 0;
failed:
if (pages_pinned > 0) {
int i;
for (i = 0; i < pages_pinned; i++) {
NV_UNPIN_USER_PAGE(user_pages[i]);
}
}
nv_drm_free(user_pages);
return (pages_pinned < 0) ? pages_pinned : -EINVAL;
}
void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages)
{
unsigned long i;
for (i = 0; i < pages_count; i++) {
set_page_dirty_lock(pages[i]);
NV_UNPIN_USER_PAGE(pages[i]);
}
nv_drm_free(pages);
}
/*
* linuxkpi vmap doesn't use the flags argument as it
* doesn't seem to be needed. Define VM_USERMAP to 0
* to make errors go away
*
* vmap: sys/compat/linuxkpi/common/src/linux_compat.c
*/
#if defined(NV_BSD)
#define VM_USERMAP 0
#endif
void *nv_drm_vmap(struct page **pages, unsigned long pages_count, bool cached)
{
pgprot_t prot = PAGE_KERNEL;
if (!cached) {
prot = pgprot_noncached(PAGE_KERNEL);
}
return vmap(pages, pages_count, VM_USERMAP, prot);
}
void nv_drm_vunmap(void *address)
{
vunmap(address);
}
bool nv_drm_workthread_init(nv_drm_workthread *worker, const char *name)
{
worker->shutting_down = false;
if (nv_kthread_q_init(&worker->q, name)) {
return false;
}
spin_lock_init(&worker->lock);
return true;
}
void nv_drm_workthread_shutdown(nv_drm_workthread *worker)
{
unsigned long flags;
spin_lock_irqsave(&worker->lock, flags);
worker->shutting_down = true;
spin_unlock_irqrestore(&worker->lock, flags);
nv_kthread_q_stop(&worker->q);
}
void nv_drm_workthread_work_init(nv_drm_work *work,
void (*callback)(void *),
void *arg)
{
nv_kthread_q_item_init(work, callback, arg);
}
int nv_drm_workthread_add_work(nv_drm_workthread *worker, nv_drm_work *work)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&worker->lock, flags);
if (!worker->shutting_down) {
ret = nv_kthread_q_schedule_q_item(&worker->q, work);
}
spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
void nv_drm_timer_setup(nv_drm_timer *timer, void (*callback)(nv_drm_timer *nv_drm_timer))
{
nv_timer_setup(timer, callback);
}
void nv_drm_mod_timer(nv_drm_timer *timer, unsigned long timeout_native)
{
mod_timer(&timer->kernel_timer, timeout_native);
}
unsigned long nv_drm_timer_now(void)
{
return jiffies;
}
unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms)
{
return jiffies + msecs_to_jiffies(relative_timeout_ms);
}
int nv_drm_create_sync_file(struct dma_fence *fence)
{
struct sync_file *sync;
int fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0) {
return fd;
}
/* sync_file_create() generates its own reference to the fence */
sync = sync_file_create(fence);
if (IS_ERR(sync)) {
put_unused_fd(fd);
return PTR_ERR(sync);
}
fd_install(fd, sync->file);
return fd;
}
struct dma_fence *nv_drm_sync_file_get_fence(int fd)
{
return sync_file_get_fence(fd);
}
void nv_drm_yield(void)
{
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
}
#endif /* NV_DRM_AVAILABLE */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015-2025, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -29,11 +29,9 @@
#if defined(NV_DRM_AVAILABLE)
#if defined(NV_DRM_FENCE_AVAILABLE)
#include "nvidia-dma-fence-helper.h"
#endif
#include "linux/dma-fence.h"
#if defined(NV_LINUX)
#if defined(NV_LINUX) || defined(NV_BSD)
#include "nv-kthread-q.h"
#include "linux/spinlock.h"
@@ -45,30 +43,32 @@ typedef struct nv_drm_workthread {
typedef nv_kthread_q_item_t nv_drm_work;
#else /* defined(NV_LINUX) */
#else
#error "Need to define deferred work primitives for this OS"
#endif /* else defined(NV_LINUX) */
#endif
#if defined(NV_LINUX)
#if defined(NV_LINUX) || defined(NV_BSD)
#include "nv-timer.h"
typedef struct nv_timer nv_drm_timer;
#else /* defined(NV_LINUX) */
#else
#error "Need to define kernel timer callback primitives for this OS"
#endif /* else defined(NV_LINUX) */
#endif
struct page;
/* Set to true when the atomic modeset feature is enabled. */
extern bool nv_drm_modeset_module_param;
#if defined(NV_DRM_FBDEV_AVAILABLE)
/* Set to true when the nvidia-drm driver should install a framebuffer device */
extern bool nv_drm_fbdev_module_param;
#endif
void *nv_drm_calloc(size_t nmemb, size_t size);
void nv_drm_free(void *ptr);
char *nv_drm_asprintf(const char *fmt, ...);
void nv_drm_write_combine_flush(void);
int nv_drm_lock_user_pages(unsigned long address,
@@ -76,7 +76,7 @@ int nv_drm_lock_user_pages(unsigned long address,
void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages);
void *nv_drm_vmap(struct page **pages, unsigned long pages_count);
void *nv_drm_vmap(struct page **pages, unsigned long pages_count, bool cached);
void nv_drm_vunmap(void *address);
@@ -97,17 +97,13 @@ void nv_drm_timer_setup(nv_drm_timer *timer,
void nv_drm_mod_timer(nv_drm_timer *timer, unsigned long relative_timeout_ms);
bool nv_drm_del_timer_sync(nv_drm_timer *timer);
unsigned long nv_drm_timer_now(void);
unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms);
#if defined(NV_DRM_FENCE_AVAILABLE)
int nv_drm_create_sync_file(nv_dma_fence_t *fence);
int nv_drm_create_sync_file(struct dma_fence *fence);
nv_dma_fence_t *nv_drm_sync_file_get_fence(int fd);
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
struct dma_fence *nv_drm_sync_file_get_fence(int fd);
void nv_drm_yield(void);

View File

@@ -31,13 +31,8 @@
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_DEVICE_H_PRESENT)
#include <drm/drm_device.h>
#endif
#if defined(NV_DRM_DRM_GEM_H_PRESENT)
#include <drm/drm_gem.h>
#endif
#include "nvidia-drm-os-interface.h"
@@ -85,8 +80,15 @@
DRM_DEBUG_DRIVER("[GPU ID 0x%08x] " __fmt, \
__dev->gpu_info.gpu_id, ##__VA_ARGS__)
enum nv_drm_input_color_space {
NV_DRM_INPUT_COLOR_SPACE_NONE,
NV_DRM_INPUT_COLOR_SPACE_SCRGB_LINEAR,
NV_DRM_INPUT_COLOR_SPACE_BT2100_PQ
};
struct nv_drm_device {
nv_gpu_info_t gpu_info;
MIGDeviceId gpu_mig_device;
struct drm_device *dev;
@@ -122,10 +124,9 @@ struct nv_drm_device {
NvU8 genericPageKind;
NvU8 pageKindGeneration;
NvU8 sectorLayout;
#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT)
NvU64 modifiers[6 /* block linear */ + 1 /* linear */ + 1 /* terminator */];
#endif
struct delayed_work hotplug_event_work;
atomic_t enable_event_handling;
/**
@@ -138,25 +139,65 @@ struct nv_drm_device {
#endif
#if defined(NV_DRM_FENCE_AVAILABLE)
NvU64 semsurf_stride;
NvU64 semsurf_max_submitted_offset;
#endif
NvBool hasVideoMemory;
NvBool supportsSyncpts;
NvBool requiresVrrSemaphores;
NvBool subOwnershipGranted;
NvBool hasFramebufferConsole;
struct drm_property *nv_out_fence_property;
struct drm_property *nv_input_colorspace_property;
struct {
NvU32 count;
NvU32 next_index;
} display_semaphores;
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
struct drm_property *nv_hdr_output_metadata_property;
#endif
struct drm_property *nv_plane_lms_ctm_property;
struct drm_property *nv_plane_lms_to_itp_ctm_property;
struct drm_property *nv_plane_itp_to_lms_ctm_property;
struct drm_property *nv_plane_blend_ctm_property;
struct drm_property *nv_plane_degamma_tf_property;
struct drm_property *nv_plane_degamma_lut_property;
struct drm_property *nv_plane_degamma_lut_size_property;
struct drm_property *nv_plane_degamma_multiplier_property;
struct drm_property *nv_plane_tmo_lut_property;
struct drm_property *nv_plane_tmo_lut_size_property;
struct drm_property *nv_crtc_regamma_tf_property;
struct drm_property *nv_crtc_regamma_lut_property;
struct drm_property *nv_crtc_regamma_lut_size_property;
struct drm_property *nv_crtc_regamma_divisor_property;
struct nv_drm_device *next;
NvU64 vtFbBaseAddress;
NvU64 vtFbSize;
};
static inline NvU32 nv_drm_next_display_semaphore(
struct nv_drm_device *nv_dev)
{
NvU32 current_index = nv_dev->display_semaphores.next_index++;
if (nv_dev->display_semaphores.next_index >=
nv_dev->display_semaphores.count) {
nv_dev->display_semaphores.next_index = 0;
}
return current_index;
}
static inline struct nv_drm_device *to_nv_device(
struct drm_device *dev)
{

View File

@@ -0,0 +1,111 @@
###########################################################################
# Kbuild fragment for nvidia-drm.ko
###########################################################################
#
# Define NVIDIA_DRM_SOURCES
#
NVIDIA_DRM_SOURCES =
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-drv.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-utils.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-crtc.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-encoder.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-connector.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fb.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-modeset.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fence.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-helper.c
NVIDIA_DRM_SOURCES += nvidia-drm/nv-kthread-q.c
NVIDIA_DRM_SOURCES += nvidia-drm/nv-pci-table.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-nvkms-memory.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-user-memory.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-dma-buf.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-format.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-os-interface.c
#
# Register the conftests needed by nvidia-drm.ko
#
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_atomic_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_inc
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_dec_and_test
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_alpha_blending_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_fd_to_handle
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_handle_to_fd
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_timer_delete_sync
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages_remote
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages_remote
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_driver_has_gem_prime_res_obj
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_funcs_have_mode_in_name
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_has_vrr_capable_property
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_for_each_possible_encoder
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += nvhost_dma_fence_unpack
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_aperture_remove_conflicting_framebuffers
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers
NV_CONFTEST_FUNCTION_COMPILE_TESTS += aperture_remove_conflicting_devices
NV_CONFTEST_FUNCTION_COMPILE_TESTS += aperture_remove_conflicting_pci_devices
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_fbdev_generic_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_fbdev_ttm_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_client_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_attach_hdr_output_metadata_property
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_plane_create_color_properties
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_legacy_gamma_set
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_mixed
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_prime_mmap
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_sysfs_connector_property_event
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_sysfs_connector_status_event
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_legacy_dev_list
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_prime_flag_present
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_t
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_has_resv
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_async_flip
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_pageflip_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_vrr_enabled
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_offset_node_has_readonly
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_display_mode_has_vrefresh
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_master_set_has_int_return_type
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_free_object
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_prime_pages_to_sg_has_drm_device_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_callbacks
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_atomic_check_has_atomic_state_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_vmap_has_map_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_plane_atomic_check_has_atomic_state_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_device_has_pdev
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_no_vblank
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_config_has_allow_fb_modifiers
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_has_hdr_output_metadata
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_add_fence
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_reserve_fences
NV_CONFTEST_TYPE_COMPILE_TESTS += reservation_object_reserve_shared_has_num_fences_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_has_override_edid
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_file_get_master
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_modeset_lock_all_end
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_dumb_destroy
NV_CONFTEST_TYPE_COMPILE_TESTS += fence_ops_use_64bit_seqno
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_framebuffers_has_driver_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_framebuffers_has_no_primary_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers_has_driver_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_create_dp_colorspace_property_has_supported_colorspaces_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_syncobj_features_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_unlocked_ioctl_flag_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_color_ctm_3x4_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_format_info_has_is_yuv
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_mmap
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_output_poll_changed
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_date
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_helper_funcs_mode_valid_has_const_mode_arg

View File

@@ -28,10 +28,7 @@
#include <drm/drmP.h>
#endif
#if defined(NV_DRM_DRM_PLANE_H_PRESENT)
#include <drm/drm_plane.h>
#endif
#include <drm/drm_modes.h>
#include <uapi/drm/drm_fourcc.h>

View File

@@ -2,30 +2,16 @@
# Kbuild fragment for nvidia-drm.ko
###########################################################################
# Get our source file list and conftest list from the common file
include $(src)/nvidia-drm/nvidia-drm-sources.mk
# Linux-specific sources
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-linux.c
#
# Define NVIDIA_DRM_{SOURCES,OBJECTS}
#
NVIDIA_DRM_SOURCES =
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-drv.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-utils.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-crtc.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-encoder.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-connector.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fb.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-modeset.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fence.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-linux.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-helper.c
NVIDIA_DRM_SOURCES += nvidia-drm/nv-kthread-q.c
NVIDIA_DRM_SOURCES += nvidia-drm/nv-pci-table.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-nvkms-memory.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-user-memory.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-dma-buf.c
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-format.c
NVIDIA_DRM_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_DRM_SOURCES))
obj-m += nvidia-drm.o
@@ -44,99 +30,4 @@ NVIDIA_DRM_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_DRM_OBJECTS), $(NVIDIA_DRM_CFLAGS))
#
# Register the conftests needed by nvidia-drm.ko
#
NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_DRM_OBJECTS)
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_atomic_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_inc
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_dec_and_test
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_alpha_blending_available
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_fd_to_handle
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_handle_to_fd
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_unref
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_reinit_primary_mode_group
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages_remote
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages_remote
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_lookup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_state_ref_counting
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_driver_has_gem_prime_res_obj
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_connector_dpms
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_funcs_have_mode_in_name
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_has_vrr_capable_property
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_framebuffer_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_put
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_format_num_planes
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_for_each_possible_encoder
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_rotation_available
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_vma_offset_exact_lookup_locked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
NV_CONFTEST_FUNCTION_COMPILE_TESTS += nvhost_dma_fence_unpack
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_fence_set_error
NV_CONFTEST_FUNCTION_COMPILE_TESTS += sync_file_get_fence
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_present
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_bus_type
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_irq
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_name
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_device_list
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_legacy_dev_list
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_set_busid
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_connectors_changed
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_init_function_args
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_helper_mode_fill_fb_struct
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_master_drop_has_from_release_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_unload_has_int_return_type
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_crtc_destroy_state_has_crtc_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_plane_destroy_state_has_plane_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_object_find_has_file_priv_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_buf_owner
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_list_iter
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_swap_state_has_stall_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_prime_flag_present
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_t
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_has_resv
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_async_flip
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_pageflip_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_vrr_enabled
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_format_modifiers_present
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_node_is_allowed_has_tag_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_offset_node_has_readonly
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_display_mode_has_vrefresh
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_master_set_has_int_return_type
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_free_object
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_prime_pages_to_sg_has_drm_device_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_callbacks
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_atomic_check_has_atomic_state_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_vmap_has_map_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_plane_atomic_check_has_atomic_state_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_device_has_pdev
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_no_vblank
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_config_has_allow_fb_modifiers
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_has_hdr_output_metadata
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_add_fence
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_reserve_fences
NV_CONFTEST_TYPE_COMPILE_TESTS += reservation_object_reserve_shared_has_num_fences_arg
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_has_override_edid
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_master_has_leases
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_file_get_master
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_modeset_lock_all_end
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_lookup
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_put
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_dumb_destroy
NV_CONFTEST_TYPE_COMPILE_TESTS += fence_ops_use_64bit_seqno
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_unlocked_ioctl_flag_present

View File

@@ -45,6 +45,7 @@ int nv_drm_init(void)
return -EINVAL;
}
nvKms->setSuspendResumeCallback(nv_drm_suspend_resume);
return nv_drm_probe_devices();
#else
return 0;
@@ -54,6 +55,7 @@ int nv_drm_init(void)
void nv_drm_exit(void)
{
#if defined(NV_DRM_AVAILABLE)
nvKms->setSuspendResumeCallback(NULL);
nv_drm_remove_devices();
#endif
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,12 +29,7 @@
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/mm.h>
#if defined(NV_LINUX_BUG_H_PRESENT)
#include <linux/bug.h>
#else
#include <asm/bug.h>
#endif
#include <linux/bug.h>
// Today's implementation is a little simpler and more limited than the
// API description allows for in nv-kthread-q.h. Details include:
@@ -176,7 +171,7 @@ static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
{
unsigned i, j;
const static unsigned attempts = 3;
static const unsigned attempts = 3;
struct task_struct *thread[3];
for (i = 0;; i++) {
@@ -201,7 +196,7 @@ static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
// Ran out of attempts - return thread even if its stack may not be
// allocated on the preferred node
if ((i == (attempts - 1)))
if (i == (attempts - 1))
break;
// Get the NUMA node where the first page of the stack is resident. If
@@ -247,6 +242,11 @@ int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferr
return 0;
}
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
{
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
}
// Returns true (non-zero) if the item was actually scheduled, and false if the
// item was already pending in a queue.
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2015-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -35,12 +35,13 @@
#include <linux/list.h>
#include <linux/rwsem.h>
#include <linux/freezer.h>
#include <linux/poll.h>
#include <linux/cdev.h>
#include <acpi/video.h>
#include "nvstatus.h"
#include "nv-register-module.h"
#include "nv-modeset-interface.h"
#include "nv-kref.h"
@@ -52,9 +53,15 @@
#include "nv-procfs.h"
#include "nv-kthread-q.h"
#include "nv-time.h"
#include "nv-timer.h"
#include "nv-lock.h"
#include "nv-chardev-numbers.h"
#if !defined(CONFIG_RETPOLINE)
/*
* Commit aefb2f2e619b ("x86/bugs: Rename CONFIG_RETPOLINE =>
* CONFIG_MITIGATION_RETPOLINE) in v6.8 renamed CONFIG_RETPOLINE.
*/
#if !defined(CONFIG_RETPOLINE) && !defined(CONFIG_MITIGATION_RETPOLINE)
#include "nv-retpoline.h"
#endif
@@ -68,6 +75,24 @@ module_param_named(output_rounding_fix, output_rounding_fix, bool, 0400);
static bool disable_hdmi_frl = false;
module_param_named(disable_hdmi_frl, disable_hdmi_frl, bool, 0400);
static bool disable_vrr_memclk_switch = false;
module_param_named(disable_vrr_memclk_switch, disable_vrr_memclk_switch, bool, 0400);
static bool hdmi_deepcolor = true;
module_param_named(hdmi_deepcolor, hdmi_deepcolor, bool, 0400);
static bool vblank_sem_control = true;
module_param_named(vblank_sem_control, vblank_sem_control, bool, 0400);
static bool opportunistic_display_sync = true;
module_param_named(opportunistic_display_sync, opportunistic_display_sync, bool, 0400);
static enum NvKmsDebugForceColorSpace debug_force_color_space = NVKMS_DEBUG_FORCE_COLOR_SPACE_NONE;
module_param_named(debug_force_color_space, debug_force_color_space, uint, 0400);
static bool enable_overlay_layers = true;
module_param_named(enable_overlay_layers, enable_overlay_layers, bool, 0400);
/* These parameters are used for fault injection tests. Normally the defaults
* should be used. */
MODULE_PARM_DESC(fail_malloc, "Fail the Nth call to nvkms_alloc");
@@ -78,17 +103,50 @@ MODULE_PARM_DESC(malloc_verbose, "Report information about malloc calls on modul
static bool malloc_verbose = false;
module_param_named(malloc_verbose, malloc_verbose, bool, 0400);
MODULE_PARM_DESC(conceal_vrr_caps,
"Conceal all display VRR capabilities");
static bool conceal_vrr_caps = false;
module_param_named(conceal_vrr_caps, conceal_vrr_caps, bool, 0400);
/* Fail allocating the RM core channel for NVKMS using the i-th method (see
* FailAllocCoreChannelMethod). Failures not using the i-th method are ignored. */
MODULE_PARM_DESC(fail_alloc_core_channel, "Control testing for hardware core channel allocation failure");
static int fail_alloc_core_channel_method = -1;
module_param_named(fail_alloc_core_channel, fail_alloc_core_channel_method, int, 0400);
#if NVKMS_CONFIG_FILE_SUPPORTED
/* This parameter is used to find the dpy override conf file */
#define NVKMS_CONF_FILE_SPECIFIED (nvkms_conf != NULL)
MODULE_PARM_DESC(config_file,
"Path to the nvidia-modeset configuration file "
"(default: disabled)");
"Path to the nvidia-modeset configuration file (default: disabled)");
static char *nvkms_conf = NULL;
module_param_named(config_file, nvkms_conf, charp, 0400);
#endif
static atomic_t nvkms_alloc_called_count;
NvBool nvkms_test_fail_alloc_core_channel(
enum FailAllocCoreChannelMethod method
)
{
if (method != fail_alloc_core_channel_method) {
// don't fail if it's not the currently specified method
return NV_FALSE;
}
printk(KERN_INFO NVKMS_LOG_PREFIX
"Failing core channel allocation using method %d",
fail_alloc_core_channel_method);
return NV_TRUE;
}
NvBool nvkms_conceal_vrr_caps(void)
{
return conceal_vrr_caps;
}
NvBool nvkms_output_rounding_fix(void)
{
return output_rounding_fix;
@@ -99,6 +157,39 @@ NvBool nvkms_disable_hdmi_frl(void)
return disable_hdmi_frl;
}
NvBool nvkms_disable_vrr_memclk_switch(void)
{
return disable_vrr_memclk_switch;
}
NvBool nvkms_hdmi_deepcolor(void)
{
return hdmi_deepcolor;
}
NvBool nvkms_vblank_sem_control(void)
{
return vblank_sem_control;
}
NvBool nvkms_opportunistic_display_sync(void)
{
return opportunistic_display_sync;
}
enum NvKmsDebugForceColorSpace nvkms_debug_force_color_space(void)
{
if (debug_force_color_space >= NVKMS_DEBUG_FORCE_COLOR_SPACE_MAX) {
return NVKMS_DEBUG_FORCE_COLOR_SPACE_NONE;
}
return debug_force_color_space;
}
NvBool nvkms_enable_overlay_layers(void)
{
return enable_overlay_layers;
}
NvBool nvkms_kernel_supports_syncpts(void)
{
/*
@@ -433,9 +524,23 @@ static inline int nvkms_read_trylock_pm_lock(void)
static inline void nvkms_read_lock_pm_lock(void)
{
while (!down_read_trylock(&nvkms_pm_lock)) {
try_to_freeze();
cond_resched();
if ((current->flags & PF_NOFREEZE)) {
/*
* Non-freezable tasks (i.e. kthreads in this case) don't have to worry
* about being frozen during system suspend, but do need to block so
* that the CPU can go idle during s2idle. Do a normal uninterruptible
* blocking wait for the PM lock.
*/
down_read(&nvkms_pm_lock);
} else {
/*
* For freezable tasks, make sure we give the kernel an opportunity to
* freeze if taking the PM lock fails.
*/
while (!down_read_trylock(&nvkms_pm_lock)) {
try_to_freeze();
cond_resched();
}
}
}
@@ -682,6 +787,8 @@ nvkms_event_queue_changed(nvkms_per_open_handle_t *pOpenKernel,
static void nvkms_suspend(NvU32 gpuId)
{
nvKmsKapiSuspendResume(NV_TRUE /* suspend */);
if (gpuId == 0) {
nvkms_write_lock_pm_lock();
}
@@ -700,6 +807,8 @@ static void nvkms_resume(NvU32 gpuId)
if (gpuId == 0) {
nvkms_write_unlock_pm_lock();
}
nvKmsKapiSuspendResume(NV_FALSE /* suspend */);
}
@@ -859,7 +968,7 @@ static void nvkms_kthread_q_callback(void *arg)
* pending timers and than waiting for workqueue callbacks.
*/
if (timer->kernel_timer_created) {
del_timer_sync(&timer->kernel_timer);
nv_timer_delete_sync(&timer->kernel_timer);
}
/*
@@ -929,12 +1038,6 @@ inline static void nvkms_timer_callback_typed_data(struct timer_list *timer)
_nvkms_timer_callback_internal(nvkms_timer);
}
inline static void nvkms_timer_callback_anon_data(unsigned long arg)
{
struct nvkms_timer_t *nvkms_timer = (struct nvkms_timer_t *) arg;
_nvkms_timer_callback_internal(nvkms_timer);
}
static void
nvkms_init_timer(struct nvkms_timer_t *timer, nvkms_timer_proc_t *proc,
void *dataPtr, NvU32 dataU32, NvBool isRefPtr, NvU64 usec)
@@ -967,13 +1070,7 @@ nvkms_init_timer(struct nvkms_timer_t *timer, nvkms_timer_proc_t *proc,
timer->kernel_timer_created = NV_FALSE;
nvkms_queue_work(&nvkms_kthread_q, &timer->nv_kthread_q_item);
} else {
#if defined(NV_TIMER_SETUP_PRESENT)
timer_setup(&timer->kernel_timer, nvkms_timer_callback_typed_data, 0);
#else
init_timer(&timer->kernel_timer);
timer->kernel_timer.function = nvkms_timer_callback_anon_data;
timer->kernel_timer.data = (unsigned long) timer;
#endif
timer->kernel_timer_created = NV_TRUE;
mod_timer(&timer->kernel_timer, jiffies + NVKMS_USECS_TO_JIFFIES(usec));
@@ -1028,49 +1125,6 @@ void nvkms_free_timer(nvkms_timer_handle_t *handle)
timer->cancel = NV_TRUE;
}
void* nvkms_get_per_open_data(int fd)
{
struct file *filp = fget(fd);
struct nvkms_per_open *popen = NULL;
dev_t rdev = 0;
void *data = NULL;
if (filp == NULL) {
return NULL;
}
if (filp->f_inode == NULL) {
goto done;
}
rdev = filp->f_inode->i_rdev;
if ((MAJOR(rdev) != NVKMS_MAJOR_DEVICE_NUMBER) ||
(MINOR(rdev) != NVKMS_MINOR_DEVICE_NUMBER)) {
goto done;
}
popen = filp->private_data;
if (popen == NULL) {
goto done;
}
data = popen->data;
done:
/*
* fget() incremented the struct file's reference count, which
* needs to be balanced with a call to fput(). It is safe to
* decrement the reference count before returning
* filp->private_data because core NVKMS is currently holding the
* nvkms_lock, which prevents the nvkms_close() => nvKmsClose()
* call chain from freeing the file out from under the caller of
* nvkms_get_per_open_data().
*/
fput(filp);
return data;
}
NvBool nvkms_fd_is_nvidia_chardev(int fd)
{
struct file *filp = fget(fd);
@@ -1214,6 +1268,11 @@ nvkms_register_backlight(NvU32 gpu_id, NvU32 display_id, void *drv_priv,
#if defined(NV_ACPI_VIDEO_BACKLIGHT_USE_NATIVE)
if (!acpi_video_backlight_use_native()) {
#if defined(NV_ACPI_VIDEO_REGISTER_BACKLIGHT)
nvkms_log(NVKMS_LOG_LEVEL_INFO, NVKMS_LOG_PREFIX,
"ACPI reported no NVIDIA native backlight available; attempting to use ACPI backlight.");
acpi_video_register_backlight();
#endif
return NULL;
}
#endif
@@ -1288,8 +1347,9 @@ static void nvkms_kapi_event_kthread_q_callback(void *arg)
nvKmsKapiHandleEventQueueChange(device);
}
struct nvkms_per_open *nvkms_open_common(enum NvKmsClientType type,
static struct nvkms_per_open *nvkms_open_common(enum NvKmsClientType type,
struct NvKmsKapiDevice *device,
NvBool interruptible,
int *status)
{
struct nvkms_per_open *popen = NULL;
@@ -1303,10 +1363,13 @@ struct nvkms_per_open *nvkms_open_common(enum NvKmsClientType type,
popen->type = type;
*status = down_interruptible(&nvkms_lock);
if (*status != 0) {
goto failed;
if (interruptible) {
*status = down_interruptible(&nvkms_lock);
if (*status != 0) {
goto failed;
}
} else {
down(&nvkms_lock);
}
popen->data = nvKmsOpen(current->tgid, type, popen);
@@ -1340,7 +1403,7 @@ failed:
return NULL;
}
void nvkms_close_pm_locked(struct nvkms_per_open *popen)
static void nvkms_close_pm_locked(struct nvkms_per_open *popen)
{
/*
* Don't use down_interruptible(): we need to free resources
@@ -1403,18 +1466,22 @@ static void nvkms_close_popen(struct nvkms_per_open *popen)
}
}
int nvkms_ioctl_common
static int nvkms_ioctl_common
(
struct nvkms_per_open *popen,
NvU32 cmd, NvU64 address, const size_t size
NvU32 cmd, NvU64 address, const size_t size,
NvBool interruptible
)
{
int status;
NvBool ret;
status = down_interruptible(&nvkms_lock);
if (status != 0) {
return status;
if (interruptible) {
int status = down_interruptible(&nvkms_lock);
if (status != 0) {
return status;
}
} else {
down(&nvkms_lock);
}
if (popen->data != NULL) {
@@ -1441,7 +1508,10 @@ struct nvkms_per_open* nvkms_open_from_kapi
struct nvkms_per_open *ret;
nvkms_read_lock_pm_lock();
ret = nvkms_open_common(NVKMS_CLIENT_KERNEL_SPACE, device, &status);
ret = nvkms_open_common(NVKMS_CLIENT_KERNEL_SPACE,
device,
NV_FALSE /* interruptible */,
&status);
nvkms_read_unlock_pm_lock();
return ret;
@@ -1452,6 +1522,28 @@ void nvkms_close_from_kapi(struct nvkms_per_open *popen)
nvkms_close_pm_unlocked(popen);
}
NvBool nvkms_ioctl_from_kapi_try_pmlock
(
struct nvkms_per_open *popen,
NvU32 cmd, void *params_address, const size_t param_size
)
{
NvBool ret;
// XXX PM lock must be allowed to fail, see bug 4432810.
if (nvkms_read_trylock_pm_lock()) {
return NV_FALSE;
}
ret = nvkms_ioctl_common(popen,
cmd,
(NvU64)(NvUPtr)params_address, param_size,
NV_FALSE /* interruptible */) == 0;
nvkms_read_unlock_pm_lock();
return ret;
}
NvBool nvkms_ioctl_from_kapi
(
struct nvkms_per_open *popen,
@@ -1463,7 +1555,8 @@ NvBool nvkms_ioctl_from_kapi
nvkms_read_lock_pm_lock();
ret = nvkms_ioctl_common(popen,
cmd,
(NvU64)(NvUPtr)params_address, param_size) == 0;
(NvU64)(NvUPtr)params_address, param_size,
NV_FALSE /* interruptible */) == 0;
nvkms_read_unlock_pm_lock();
return ret;
@@ -1615,6 +1708,7 @@ static void nvkms_proc_exit(void)
/*************************************************************************
* NVKMS Config File Read
************************************************************************/
#if NVKMS_CONFIG_FILE_SUPPORTED
static NvBool nvkms_fs_mounted(void)
{
return current->fs != NULL;
@@ -1631,10 +1725,10 @@ static size_t nvkms_config_file_open
struct inode *file_inode;
size_t file_size = 0;
size_t read_size = 0;
#if defined(NV_KERNEL_READ_HAS_POINTER_POS_ARG)
loff_t pos = 0;
#endif
*buff = NULL;
if (!nvkms_fs_mounted()) {
printk(KERN_ERR NVKMS_LOG_PREFIX "ERROR: Filesystems not mounted\n");
return 0;
@@ -1658,6 +1752,11 @@ static size_t nvkms_config_file_open
goto done;
}
// Do not alloc a 0 sized buffer
if (file_size == 0) {
goto done;
}
*buff = nvkms_alloc(file_size, NV_FALSE);
if (*buff == NULL) {
printk(KERN_WARNING NVKMS_LOG_PREFIX "WARNING: Out of memory\n");
@@ -1669,14 +1768,8 @@ static size_t nvkms_config_file_open
* kernel_read_file for kernels >= 4.6
*/
while ((read_size < file_size) && (i++ < NVKMS_READ_FILE_MAX_LOOPS)) {
#if defined(NV_KERNEL_READ_HAS_POINTER_POS_ARG)
ssize_t ret = kernel_read(file, *buff + read_size,
file_size - read_size, &pos);
#else
ssize_t ret = kernel_read(file, read_size,
*buff + read_size,
file_size - read_size);
#endif
if (ret <= 0) {
break;
}
@@ -1722,6 +1815,11 @@ static void nvkms_read_config_file_locked(void)
nvkms_free(buffer, buf_size);
}
#else
static void nvkms_read_config_file_locked(void)
{
}
#endif
/*************************************************************************
* NVKMS KAPI functions
@@ -1736,6 +1834,48 @@ NvBool nvKmsKapiGetFunctionsTable
}
EXPORT_SYMBOL(nvKmsKapiGetFunctionsTable);
NvU32 nvKmsKapiF16ToF32(NvU16 a)
{
return nvKmsKapiF16ToF32Internal(a);
}
EXPORT_SYMBOL(nvKmsKapiF16ToF32);
NvU16 nvKmsKapiF32ToF16(NvU32 a)
{
return nvKmsKapiF32ToF16Internal(a);
}
EXPORT_SYMBOL(nvKmsKapiF32ToF16);
NvU32 nvKmsKapiF32Mul(NvU32 a, NvU32 b)
{
return nvKmsKapiF32MulInternal(a, b);
}
EXPORT_SYMBOL(nvKmsKapiF32Mul);
NvU32 nvKmsKapiF32Div(NvU32 a, NvU32 b)
{
return nvKmsKapiF32DivInternal(a, b);
}
EXPORT_SYMBOL(nvKmsKapiF32Div);
NvU32 nvKmsKapiF32Add(NvU32 a, NvU32 b)
{
return nvKmsKapiF32AddInternal(a, b);
}
EXPORT_SYMBOL(nvKmsKapiF32Add);
NvU32 nvKmsKapiF32ToUI32RMinMag(NvU32 a, NvBool exact)
{
return nvKmsKapiF32ToUI32RMinMagInternal(a, exact);
}
EXPORT_SYMBOL(nvKmsKapiF32ToUI32RMinMag);
NvU32 nvKmsKapiUI32ToF32(NvU32 a)
{
return nvKmsKapiUI32ToF32Internal(a);
}
EXPORT_SYMBOL(nvKmsKapiUI32ToF32);
/*************************************************************************
* File operation callback functions.
*************************************************************************/
@@ -1750,7 +1890,10 @@ static int nvkms_open(struct inode *inode, struct file *filp)
}
filp->private_data =
nvkms_open_common(NVKMS_CLIENT_USER_SPACE, NULL, &status);
nvkms_open_common(NVKMS_CLIENT_USER_SPACE,
NULL,
NV_TRUE /* interruptible */,
&status);
nvkms_read_unlock_pm_lock();
@@ -1809,13 +1952,20 @@ static int nvkms_ioctl(struct inode *inode, struct file *filp,
status = nvkms_ioctl_common(popen,
params.cmd,
params.address,
params.size);
params.size,
NV_TRUE /* interruptible */);
nvkms_read_unlock_pm_lock();
return status;
}
static long nvkms_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
return nvkms_ioctl(filp->f_inode, filp, cmd, arg);
}
static unsigned int nvkms_poll(struct file *filp, poll_table *wait)
{
unsigned int mask = 0;
@@ -1843,17 +1993,73 @@ static unsigned int nvkms_poll(struct file *filp, poll_table *wait)
* Module loading support code.
*************************************************************************/
static nvidia_module_t nvidia_modeset_module = {
#define NVKMS_RDEV (MKDEV(NV_MAJOR_DEVICE_NUMBER, \
NV_MINOR_DEVICE_NUMBER_MODESET_DEVICE))
static struct file_operations nvkms_fops = {
.owner = THIS_MODULE,
.module_name = "nvidia-modeset",
.instance = 1, /* minor number: 255-1=254 */
.open = nvkms_open,
.close = nvkms_close,
.mmap = nvkms_mmap,
.ioctl = nvkms_ioctl,
.poll = nvkms_poll,
.unlocked_ioctl = nvkms_unlocked_ioctl,
#if NVCPU_IS_X86_64 || NVCPU_IS_AARCH64
.compat_ioctl = nvkms_unlocked_ioctl,
#endif
.mmap = nvkms_mmap,
.open = nvkms_open,
.release = nvkms_close,
};
static struct cdev nvkms_device_cdev;
static int __init nvkms_register_chrdev(void)
{
int ret;
ret = register_chrdev_region(NVKMS_RDEV, 1, "nvidia-modeset");
if (ret < 0) {
return ret;
}
cdev_init(&nvkms_device_cdev, &nvkms_fops);
ret = cdev_add(&nvkms_device_cdev, NVKMS_RDEV, 1);
if (ret < 0) {
unregister_chrdev_region(NVKMS_RDEV, 1);
return ret;
}
return ret;
}
static void nvkms_unregister_chrdev(void)
{
cdev_del(&nvkms_device_cdev);
unregister_chrdev_region(NVKMS_RDEV, 1);
}
void* nvkms_get_per_open_data(int fd)
{
struct file *filp = fget(fd);
void *data = NULL;
if (filp) {
if (filp->f_op == &nvkms_fops && filp->private_data) {
struct nvkms_per_open *popen = filp->private_data;
data = popen->data;
}
/*
* fget() incremented the struct file's reference count, which needs to
* be balanced with a call to fput(). It is safe to decrement the
* reference count before returning filp->private_data because core
* NVKMS is currently holding the nvkms_lock, which prevents the
* nvkms_close() => nvKmsClose() call chain from freeing the file out
* from under the caller of nvkms_get_per_open_data().
*/
fput(filp);
}
return data;
}
static int __init nvkms_init(void)
{
int ret;
@@ -1884,10 +2090,9 @@ static int __init nvkms_init(void)
INIT_LIST_HEAD(&nvkms_timers.list);
spin_lock_init(&nvkms_timers.lock);
ret = nvidia_register_module(&nvidia_modeset_module);
ret = nvkms_register_chrdev();
if (ret != 0) {
goto fail_register_module;
goto fail_register_chrdev;
}
down(&nvkms_lock);
@@ -1906,8 +2111,8 @@ static int __init nvkms_init(void)
return 0;
fail_module_load:
nvidia_unregister_module(&nvidia_modeset_module);
fail_register_module:
nvkms_unregister_chrdev();
fail_register_chrdev:
nv_kthread_q_stop(&nvkms_deferred_close_kthread_q);
fail_deferred_close_kthread:
nv_kthread_q_stop(&nvkms_kthread_q);
@@ -1945,7 +2150,11 @@ restart:
* completion, and we wait for queue completion with
* nv_kthread_q_stop below.
*/
#if !defined(NV_BSD) && NV_IS_EXPORT_SYMBOL_PRESENT_timer_delete_sync
if (timer_delete_sync(&timer->kernel_timer) == 1) {
#else
if (del_timer_sync(&timer->kernel_timer) == 1) {
#endif
/* We've deactivated timer so we need to clean after it */
list_del(&timer->timers_list);
@@ -1971,7 +2180,7 @@ restart:
nv_kthread_q_stop(&nvkms_deferred_close_kthread_q);
nv_kthread_q_stop(&nvkms_kthread_q);
nvidia_unregister_module(&nvidia_modeset_module);
nvkms_unregister_chrdev();
nvkms_free_rm();
if (malloc_verbose) {

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -67,6 +67,14 @@ enum NvKmsSyncPtOp {
NVKMS_SYNCPT_OP_READ_MINVAL,
};
enum NvKmsDebugForceColorSpace {
NVKMS_DEBUG_FORCE_COLOR_SPACE_NONE,
NVKMS_DEBUG_FORCE_COLOR_SPACE_RGB,
NVKMS_DEBUG_FORCE_COLOR_SPACE_YUV444,
NVKMS_DEBUG_FORCE_COLOR_SPACE_YUV422,
NVKMS_DEBUG_FORCE_COLOR_SPACE_MAX,
};
typedef struct {
struct {
@@ -96,8 +104,21 @@ typedef struct {
} read_minval;
} NvKmsSyncPtOpParams;
enum FailAllocCoreChannelMethod {
FAIL_ALLOC_CORE_CHANNEL_RM_SETUP_CORE_CHANNEL = 0,
FAIL_ALLOC_CORE_CHANNEL_RESTORE_CONSOLE = 1,
};
NvBool nvkms_test_fail_alloc_core_channel(enum FailAllocCoreChannelMethod method);
NvBool nvkms_conceal_vrr_caps(void);
NvBool nvkms_output_rounding_fix(void);
NvBool nvkms_disable_hdmi_frl(void);
NvBool nvkms_disable_vrr_memclk_switch(void);
NvBool nvkms_hdmi_deepcolor(void);
NvBool nvkms_vblank_sem_control(void);
NvBool nvkms_opportunistic_display_sync(void);
enum NvKmsDebugForceColorSpace nvkms_debug_force_color_space(void);
NvBool nvkms_enable_overlay_layers(void);
void nvkms_call_rm (void *ops);
void* nvkms_alloc (size_t size,
@@ -329,6 +350,16 @@ NvBool nvkms_ioctl_from_kapi
NvU32 cmd, void *params_address, const size_t params_size
);
/*!
* Like nvkms_ioctl_from_kapi, but return NV_FALSE instead of waiting if the
* power management read lock cannot be acquired.
*/
NvBool nvkms_ioctl_from_kapi_try_pmlock
(
struct nvkms_per_open *popen,
NvU32 cmd, void *params_address, const size_t params_size
);
/*!
* APIs for locking.
*/

View File

@@ -40,9 +40,6 @@ NV_KERNEL_MODULE_TARGETS += $(NVIDIA_MODESET_KO)
NVIDIA_MODESET_BINARY_OBJECT := $(src)/nvidia-modeset/nv-modeset-kernel.o_binary
NVIDIA_MODESET_BINARY_OBJECT_O := nvidia-modeset/nv-modeset-kernel.o
quiet_cmd_symlink = SYMLINK $@
cmd_symlink = ln -sf $< $@
targets += $(NVIDIA_MODESET_BINARY_OBJECT_O)
$(obj)/$(NVIDIA_MODESET_BINARY_OBJECT_O): $(NVIDIA_MODESET_BINARY_OBJECT) FORCE
@@ -55,9 +52,21 @@ nvidia-modeset-y += $(NVIDIA_MODESET_BINARY_OBJECT_O)
# Define nvidia-modeset.ko-specific CFLAGS.
#
NVIDIA_MODESET_CFLAGS += -I$(src)/nvidia-modeset
NVIDIA_MODESET_CFLAGS += -I$(src)/nvidia-modeset -I$(src)/common/inc
NVIDIA_MODESET_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0
# Some Android kernels prohibit driver use of filesystem functions like
# filp_open() and kernel_read(). Disable the NVKMS_CONFIG_FILE_SUPPORTED
# functionality that uses those functions when building for Android.
PLATFORM_IS_ANDROID ?= 0
ifeq ($(PLATFORM_IS_ANDROID),1)
NVIDIA_MODESET_CFLAGS += -DNVKMS_CONFIG_FILE_SUPPORTED=0
else
NVIDIA_MODESET_CFLAGS += -DNVKMS_CONFIG_FILE_SUPPORTED=1
endif
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_MODESET_OBJECTS), $(NVIDIA_MODESET_CFLAGS))
@@ -85,11 +94,10 @@ $(obj)/$(NVIDIA_MODESET_INTERFACE): $(addprefix $(obj)/,$(NVIDIA_MODESET_OBJECTS
NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_MODESET_OBJECTS)
NV_CONFTEST_TYPE_COMPILE_TESTS += timespec64
NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pde_data
NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_real_ts64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64
NV_CONFTEST_FUNCTION_COMPILE_TESTS += acpi_video_backlight_use_native
NV_CONFTEST_FUNCTION_COMPILE_TESTS += acpi_video_register_backlight
NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_timer_delete_sync

View File

@@ -66,6 +66,8 @@ enum NvKmsClientType {
NVKMS_CLIENT_KERNEL_SPACE,
};
struct NvKmsPerOpenDev;
NvBool nvKmsIoctl(
void *pOpenVoid,
NvU32 cmd,
@@ -101,7 +103,25 @@ NvBool nvKmsKapiGetFunctionsTableInternal
struct NvKmsKapiFunctionsTable *funcsTable
);
void nvKmsKapiSuspendResume(NvBool suspend);
NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness);
NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness);
NvBool nvKmsOpenDevHasSubOwnerPermissionOrBetter(const struct NvKmsPerOpenDev *pOpenDev);
NvU32 nvKmsKapiF16ToF32Internal(NvU16 a);
NvU16 nvKmsKapiF32ToF16Internal(NvU32 a);
NvU32 nvKmsKapiF32MulInternal(NvU32 a, NvU32 b);
NvU32 nvKmsKapiF32DivInternal(NvU32 a, NvU32 b);
NvU32 nvKmsKapiF32AddInternal(NvU32 a, NvU32 b);
NvU32 nvKmsKapiF32ToUI32RMinMagInternal(NvU32 a, NvBool exact);
NvU32 nvKmsKapiUI32ToF32Internal(NvU32 a);
#endif /* __NV_KMS_H__ */

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -21,27 +21,27 @@
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_FRONTEND_H_
#define _NV_FRONTEND_H_
#ifndef __DETECT_SELF_HOSTED_H__
#define __DETECT_SELF_HOSTED_H__
#include "nvtypes.h"
#include "nv-linux.h"
#include "nv-register-module.h"
#define NV_MAX_MODULE_INSTANCES 8
#define NV_FRONTEND_MINOR_NUMBER(x) minor((x)->i_rdev)
static inline int pci_devid_is_self_hosted_hopper(unsigned short devid)
{
return devid >= 0x2340 && devid <= 0x237f; // GH100 Self-Hosted
}
#define NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX 255
#define NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN (NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - \
NV_MAX_MODULE_INSTANCES)
static inline int pci_devid_is_self_hosted_blackwell(unsigned short devid)
{
return (devid >= 0x2940 && devid <= 0x297f) // GB100 Self-Hosted
|| (devid >= 0x31c0 && devid <= 0x31ff); // GB110 Self-Hosted
}
#define NV_FRONTEND_IS_CONTROL_DEVICE(x) ((x <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX) && \
(x > NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN))
int nvidia_frontend_add_device(nvidia_module_t *, nv_linux_state_t *);
int nvidia_frontend_remove_device(nvidia_module_t *, nv_linux_state_t *);
extern nvidia_module_t *nv_minor_num_table[];
static inline int pci_devid_is_self_hosted(unsigned short devid)
{
return pci_devid_is_self_hosted_hopper(devid) ||
pci_devid_is_self_hosted_blackwell(devid)
;
}
#endif

View File

@@ -1,12 +1,14 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* Copyright 2021-2024 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
#ifndef CRYPTLIB_CERT_H
#define CRYPTLIB_CERT_H
#if LIBSPDM_CERT_PARSE_SUPPORT
/**
* Retrieve the tag and length of the tag.
*
@@ -33,10 +35,10 @@ extern bool libspdm_asn1_get_tag(uint8_t **ptr, const uint8_t *end, size_t *leng
* @param[in, out] subject_size The size in bytes of the cert_subject buffer on input,
* and the size of buffer returned cert_subject on output.
*
* @retval true The certificate subject retrieved successfully.
* @retval false Invalid certificate, or the subject_size is too small for the result.
* The subject_size will be updated with the required size.
* @retval false This interface is not supported.
* @retval true If the subject_size is not equal 0. The certificate subject retrieved successfully.
* @retval true If the subject_size is equal 0. The certificate parse successful. But the cert doesn't have subject.
* @retval false If the subject_size is not equal 0. The certificate subject retrieved successfully.But the subject_size is too small for the result.
* @retval false If the subject_size is equal 0. Invalid certificate.
**/
extern bool libspdm_x509_get_subject_name(const uint8_t *cert, size_t cert_size,
uint8_t *cert_subject,
@@ -78,6 +80,25 @@ extern bool libspdm_x509_get_serial_number(const uint8_t *cert, size_t cert_size
uint8_t *serial_number,
size_t *serial_number_size);
#if LIBSPDM_ADDITIONAL_CHECK_CERT
/**
* Retrieve the signature algorithm from one X.509 certificate.
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] oid Signature algorithm Object identifier buffer.
* @param[in,out] oid_size Signature algorithm Object identifier buffer size.
*
* @retval true if the oid_size is equal 0, the cert parse successfully, but cert doesn't have signature algo.
* @retval true if the oid_size is not equal 0, the cert parse and get signature algo successfully.
* @retval false if the oid_size is equal 0, the cert parse failed.
* @retval false if the oid_size is not equal 0, the cert parse and get signature algo successfully, but the input buffer size is small.
**/
extern bool libspdm_x509_get_signature_algorithm(const uint8_t *cert,
size_t cert_size, uint8_t *oid,
size_t *oid_size);
#endif /* LIBSPDM_ADDITIONAL_CHECK_CERT */
/**
* Retrieve the issuer bytes from one X.509 certificate.
*
@@ -91,10 +112,10 @@ extern bool libspdm_x509_get_serial_number(const uint8_t *cert, size_t cert_size
* @param[in, out] issuer_size The size in bytes of the cert_issuer buffer on input,
* and the size of buffer returned cert_issuer on output.
*
* @retval true The certificate issuer retrieved successfully.
* @retval false Invalid certificate, or the issuer_size is too small for the result.
* The issuer_size will be updated with the required size.
* @retval false This interface is not supported.
* @retval true If the issuer_size is not equal 0. The certificate issuer retrieved successfully.
* @retval true If the issuer_size is equal 0. The certificate parse successful. But the cert doesn't have issuer.
* @retval false If the issuer_size is not equal 0. The certificate issuer retrieved successfully. But the issuer_size is too small for the result.
* @retval false If the issuer_size is equal 0. Invalid certificate.
**/
extern bool libspdm_x509_get_issuer_name(const uint8_t *cert, size_t cert_size,
uint8_t *cert_issuer,
@@ -110,8 +131,11 @@ extern bool libspdm_x509_get_issuer_name(const uint8_t *cert, size_t cert_size,
* @param[out] extension_data Extension bytes.
* @param[in, out] extension_data_size Extension bytes size.
*
* @retval true
* @retval false
* @retval true If the returned extension_data_size == 0, it means that cert and oid are valid, but the oid extension is not found;
* If the returned extension_data_size != 0, it means that cert and oid are valid, and the oid extension is found;
* @retval false If the returned extension_data_size == 0, it means that cert or oid are invalid;
* If the returned extension_data_size != 0, it means that cert and oid are valid, and the oid extension is found,
* but the store buffer is too small.
**/
extern bool libspdm_x509_get_extension_data(const uint8_t *cert, size_t cert_size,
const uint8_t *oid, size_t oid_size,
@@ -135,9 +159,14 @@ extern bool libspdm_x509_get_extension_data(const uint8_t *cert, size_t cert_siz
* Note: libspdm_x509_compare_date_time to compare date_time oject
* x509SetDateTime to get a date_time object from a date_time_str
*
* @retval true The certificate Validity retrieved successfully.
* @retval false Invalid certificate, or Validity retrieve failed.
* @retval false This interface is not supported.
* @retval true if the from_size and from_size are not equal 0.
* The certificate Validity retrieved successfully.
* @retval true if the from_size and from_size are equal 0.
* The certificate Validity does not exist.
* @retval false if the from_size and from_size are not equal 0.
* The certificate Validity retrieved successfully, but the input buffer size is small.
* @retval false if the from_size and from_size are equal 0.
* Invalid certificate, or Validity retrieve failed.
**/
extern bool libspdm_x509_get_validity(const uint8_t *cert, size_t cert_size,
uint8_t *from, size_t *from_size, uint8_t *to,
@@ -187,9 +216,9 @@ extern int32_t libspdm_x509_compare_date_time(const void *date_time1, const void
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] usage Key usage (LIBSPDM_CRYPTO_X509_KU_*)
*
* @retval true The certificate key usage retrieved successfully.
* @retval false Invalid certificate, or usage is NULL
* @retval false This interface is not supported.
* @retval true if the usage is no equal 0. The certificate key usage retrieved successfully.
* @retval true if the usage is equal 0. The certificate parse successfully, but the cert doesn't have key usage.
* @retval false Invalid certificate, or usage is NULL.
**/
extern bool libspdm_x509_get_key_usage(const uint8_t *cert, size_t cert_size, size_t *usage);
@@ -199,10 +228,13 @@ extern bool libspdm_x509_get_key_usage(const uint8_t *cert, size_t cert_size, si
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] usage Key usage bytes.
* @param[in, out] usage_size Key usage buffer sizs in bytes.
* @param[in, out] usage_size Key usage buffer size in bytes.
*
* @retval true
* @retval false
* @retval true If the returned usage_size == 0, it means that cert and oid are valid, but the Extended key usage is not found;
* If the returned usage_size != 0, it means that cert and oid are valid, and the Extended key usage is found;
* @retval false If the returned usage_size == 0, it means that cert or oid are invalid;
* If the returned usage_size != 0, it means that cert and oid are valid, and the Extended key usage is found,
* but the store buffer is too small.
**/
extern bool libspdm_x509_get_extended_key_usage(const uint8_t *cert,
size_t cert_size, uint8_t *usage,
@@ -214,10 +246,13 @@ extern bool libspdm_x509_get_extended_key_usage(const uint8_t *cert,
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] basic_constraints Basic constraints bytes.
* @param[in, out] basic_constraints_size Basic constraints buffer sizs in bytes.
* @param[in, out] basic_constraints_size Basic constraints buffer size in bytes.
*
* @retval true
* @retval false
* @retval true If the returned basic_constraints_size == 0, it means that cert and oid are valid, but the basic_constraints is not found;
* If the returned basic_constraints_size != 0, it means that cert and oid are valid, and the basic_constraints is found;
* @retval false If the returned basic_constraints_size == 0, it means that cert or oid are invalid;
* If the returned basic_constraints_size != 0, it means that cert and oid are valid, and the basic_constraints is found,
* but the store buffer is too small.
**/
extern bool libspdm_x509_get_extended_basic_constraints(const uint8_t *cert,
size_t cert_size,
@@ -250,16 +285,16 @@ extern bool libspdm_x509_verify_cert(const uint8_t *cert, size_t cert_size,
*
* @param[in] cert_chain One or more ASN.1 DER-encoded X.509 certificates
* where the first certificate is signed by the Root
* Certificate or is the Root Cerificate itself. and
* subsequent cerificate is signed by the preceding
* cerificate.
* Certificate or is the Root Certificate itself. and
* subsequent certificate is signed by the preceding
* certificate.
* @param[in] cert_chain_length Total length of the certificate chain, in bytes.
*
* @param[in] root_cert Trusted Root Certificate buffer.
*
* @param[in] root_cert_length Trusted Root Certificate buffer length.
*
* @retval true All cerificates were issued by the first certificate in X509Certchain.
* @retval true All certificates were issued by the first certificate in X509Certchain.
* @retval false Invalid certificate or the certificate was not issued by the given
* trusted CA.
**/
@@ -272,12 +307,12 @@ extern bool libspdm_x509_verify_cert_chain(const uint8_t *root_cert, size_t root
*
* @param[in] cert_chain One or more ASN.1 DER-encoded X.509 certificates
* where the first certificate is signed by the Root
* Certificate or is the Root Cerificate itself. and
* subsequent cerificate is signed by the preceding
* cerificate.
* Certificate or is the Root Certificate itself. and
* subsequent certificate is signed by the preceding
* certificate.
* @param[in] cert_chain_length Total length of the certificate chain, in bytes.
*
* @param[in] cert_index Index of certificate. If index is -1 indecate the
* @param[in] cert_index Index of certificate. If index is -1 indicates the
* last certificate in cert_chain.
*
* @param[out] cert The certificate at the index of cert_chain.
@@ -301,7 +336,7 @@ extern bool libspdm_x509_get_cert_from_cert_chain(const uint8_t *cert_chain,
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] rsa_context Pointer to new-generated RSA context which contain the retrieved
* @param[out] rsa_context Pointer to newly generated RSA context which contain the retrieved
* RSA public key component. Use libspdm_rsa_free() function to free the
* resource.
*
@@ -319,7 +354,7 @@ extern bool libspdm_rsa_get_public_key_from_x509(const uint8_t *cert, size_t cer
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] ec_context Pointer to new-generated EC DSA context which contain the retrieved
* @param[out] ec_context Pointer to newly generated EC DSA context which contain the retrieved
* EC public key component. Use libspdm_ec_free() function to free the
* resource.
*
@@ -340,7 +375,7 @@ extern bool libspdm_ec_get_public_key_from_x509(const uint8_t *cert, size_t cert
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] ecd_context Pointer to new-generated Ed DSA context which contain the retrieved
* @param[out] ecd_context Pointer to newly generated Ed DSA context which contain the retrieved
* Ed public key component. Use libspdm_ecd_free() function to free the
* resource.
*
@@ -361,7 +396,7 @@ extern bool libspdm_ecd_get_public_key_from_x509(const uint8_t *cert, size_t cer
*
* @param[in] cert Pointer to the DER-encoded X509 certificate.
* @param[in] cert_size Size of the X509 certificate in bytes.
* @param[out] sm2_context Pointer to new-generated sm2 context which contain the retrieved
* @param[out] sm2_context Pointer to newly generated sm2 context which contain the retrieved
* sm2 public key component. Use sm2_free() function to free the
* resource.
*
@@ -376,41 +411,6 @@ extern bool libspdm_sm2_get_public_key_from_x509(const uint8_t *cert, size_t cer
void **sm2_context);
#endif /* LIBSPDM_SM2_DSA_SUPPORT */
#if LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP
/**
* Generate a CSR.
*
* @param[in] hash_nid hash algo for sign
* @param[in] asym_nid asym algo for sign
*
* @param[in] requester_info requester info to gen CSR
* @param[in] requester_info_length The len of requester info
*
* @param[in] context Pointer to asymmetric context
* @param[in] subject_name Subject name: should be break with ',' in the middle
* example: "C=AA,CN=BB"
*
* Subject names should contain a comma-separated list of OID types and values:
* The valid OID type name is in:
* {"CN", "commonName", "C", "countryName", "O", "organizationName","L",
* "OU", "organizationalUnitName", "ST", "stateOrProvinceName", "emailAddress",
* "serialNumber", "postalAddress", "postalCode", "dnQualifier", "title",
* "SN","givenName","GN", "initials", "pseudonym", "generationQualifier", "domainComponent", "DC"}.
* Note: The object of C and countryName should be CSR Supported Country Codes
*
* @param[in] csr_len For input, csr_len is the size of store CSR buffer.
* For output, csr_len is CSR len for DER format
* @param[in] csr_pointer For input, csr_pointer is buffer address to store CSR.
* For output, csr_pointer is address for stored CSR.
* The csr_pointer address will be changed.
*
* @retval true Success.
* @retval false Failed to gen CSR.
**/
extern bool libspdm_gen_x509_csr(size_t hash_nid, size_t asym_nid,
uint8_t *requester_info, size_t requester_info_length,
void *context, char *subject_name,
size_t *csr_len, uint8_t **csr_pointer);
#endif /* LIBSPDM_ENABLE_CAPABILITY_GET_CSR_CAP */
#endif /* LIBSPDM_CERT_PARSE_SUPPORT */
#endif /* CRYPTLIB_CERT_H */

View File

@@ -28,6 +28,44 @@ extern void *libspdm_ec_new_by_nid(size_t nid);
* @param[in] ec_context Pointer to the EC context to be released.
**/
extern void libspdm_ec_free(void *ec_context);
#if LIBSPDM_FIPS_MODE
/**
* Sets the private key component into the established EC context.
*
* For P-256, the private_key_size is 32 byte.
* For P-384, the private_key_size is 48 byte.
* For P-521, the private_key_size is 66 byte.
*
* @param[in, out] ec_context Pointer to EC context being set.
* @param[in] private_key Pointer to the private key buffer.
* @param[in] private_key_size The size of private key buffer in bytes.
*
* @retval true EC private key component was set successfully.
* @retval false Invalid EC private key component.
*
**/
extern bool libspdm_ec_set_priv_key(void *ec_context, const uint8_t *private_key,
size_t private_key_size);
/**
* Sets the public key component into the established EC context.
*
* For P-256, the public_size is 64. first 32-byte is X, second 32-byte is Y.
* For P-384, the public_size is 96. first 48-byte is X, second 48-byte is Y.
* For P-521, the public_size is 132. first 66-byte is X, second 66-byte is Y.
*
* @param[in, out] ec_context Pointer to EC context being set.
* @param[in] public Pointer to the buffer to receive generated public X,Y.
* @param[in] public_size The size of public buffer in bytes.
*
* @retval true EC public key component was set successfully.
* @retval false Invalid EC public key component.
**/
extern bool libspdm_ec_set_pub_key(void *ec_context, const uint8_t *public_key,
size_t public_key_size);
#endif /* LIBSPDM_FIPS_MODE */
#endif /* (LIBSPDM_ECDHE_SUPPORT) || (LIBSPDM_ECDSA_SUPPORT) */
#if LIBSPDM_ECDHE_SUPPORT
@@ -99,6 +137,29 @@ extern bool libspdm_ec_compute_key(void *ec_context, const uint8_t *peer_public,
#endif /* LIBSPDM_ECDHE_SUPPORT */
#if LIBSPDM_ECDSA_SUPPORT
/**
* Generates Elliptic Curve context from DER-encoded public key data.
*
* The public key is ASN.1 DER-encoded as RFC7250 describes,
* namely, the SubjectPublicKeyInfo structure of a X.509 certificate.
*
* @param[in] der_data Pointer to the DER-encoded public key data.
* @param[in] der_size Size of the DER-encoded public key data in bytes.
* @param[out] ec_context Pointer to newly generated EC context which contains the
* EC public key component.
* Use libspdm_ec_free() function to free the resource.
*
* If der_data is NULL, then return false.
* If ec_context is NULL, then return false.
*
* @retval true EC context was generated successfully.
* @retval false Invalid DER public key data.
*
**/
extern bool libspdm_ec_get_public_key_from_der(const uint8_t *der_data,
size_t der_size,
void **ec_context);
/**
* Carries out the EC-DSA signature.
*
@@ -132,6 +193,29 @@ extern bool libspdm_ecdsa_sign(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size);
#if LIBSPDM_FIPS_MODE
/**
* Carries out the EC-DSA signature with caller input random function. This API can be used for FIPS test.
*
* @param[in] ec_context Pointer to EC context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be signed.
* @param[in] hash_size Size of the message hash in bytes.
* @param[out] signature Pointer to buffer to receive EC-DSA signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
* @param[in] random_func random number function
*
* @retval true signature successfully generated in EC-DSA.
* @retval false signature generation failed.
* @retval false sig_size is too small.
**/
extern bool libspdm_ecdsa_sign_ex(void *ec_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size,
int (*random_func)(void *, unsigned char *, size_t));
#endif/*LIBSPDM_FIPS_MODE*/
/**
* Verifies the EC-DSA signature.
*

View File

@@ -22,6 +22,29 @@
**/
extern void *libspdm_ecd_new_by_nid(size_t nid);
/**
* Generates Edwards-Curve context from DER-encoded public key data.
*
* The public key is ASN.1 DER-encoded as RFC7250 describes,
* namely, the SubjectPublicKeyInfo structure of a X.509 certificate.
*
* @param[in] der_data Pointer to the DER-encoded public key data.
* @param[in] der_size Size of the DER-encoded public key data in bytes.
* @param[out] ec_context Pointer to newly generated Ed context which contains the
* Ed public key component.
* Use libspdm_ecd_free() function to free the resource.
*
* If der_data is NULL, then return false.
* If ecd_context is NULL, then return false.
*
* @retval true Ed context was generated successfully.
* @retval false Invalid DER public key data.
*
**/
extern bool libspdm_ecd_get_public_key_from_der(const uint8_t *der_data,
size_t der_size,
void **ecd_context);
/**
* Release the specified Ed context.
*
@@ -29,6 +52,56 @@ extern void *libspdm_ecd_new_by_nid(size_t nid);
**/
extern void libspdm_ecd_free(void *ecd_context);
/**
* Sets the public key component into the established Ed context.
*
* For ed25519, the public_size is 32.
* For ed448, the public_size is 57.
*
* @param[in, out] ecd_context Pointer to Ed context being set.
* @param[in] public_key Pointer to the buffer to receive generated public X,Y.
* @param[in] public_size The size of public buffer in bytes.
*
* @retval true Ed public key component was set successfully.
* @retval false Invalid EC public key component.
**/
extern bool libspdm_ecd_set_pub_key(void *ecd_context, const uint8_t *public_key,
size_t public_key_size);
/**
* Sets the private key component into the established Ed context.
*
* For ed25519, the private_size is 32.
* For ed448, the private_size is 57.
*
* @param[in, out] ecd_context Pointer to Ed context being set.
* @param[in] private Pointer to the buffer to receive generated private X,Y.
* @param[in] private_size The size of private buffer in bytes.
*
* @retval true Ed private key component was set successfully.
* @retval false Invalid EC private key component.
*
**/
bool libspdm_ecd_set_pri_key(void *ecd_context, const uint8_t *private_key,
size_t private_key_size);
/**
* Gets the public key component from the established Ed context.
*
* For ed25519, the public_size is 32.
* For ed448, the public_size is 57.
*
* @param[in, out] ecd_context Pointer to Ed context being set.
* @param[out] public Pointer to the buffer to receive generated public X,Y.
* @param[in, out] public_size On input, the size of public buffer in bytes.
* On output, the size of data returned in public buffer in bytes.
*
* @retval true Ed key component was retrieved successfully.
* @retval false Invalid EC public key component.
**/
extern bool libspdm_ecd_get_pub_key(void *ecd_context, uint8_t *public_key,
size_t *public_key_size);
/**
* Carries out the Ed-DSA signature.
*

View File

@@ -35,6 +35,28 @@ typedef enum {
**/
extern void *libspdm_rsa_new(void);
/**
* Generates RSA context from DER-encoded public key data.
*
* The public key is ASN.1 DER-encoded as RFC7250 describes,
* namely, the SubjectPublicKeyInfo structure of a X.509 certificate.
*
* @param[in] der_data Pointer to the DER-encoded public key data.
* @param[in] der_size Size of the DER-encoded public key data in bytes.
* @param[out] rsa_context Pointer to newly generated RSA context which contains the
* RSA public key component.
* Use libspdm_rsa_free() function to free the resource.
*
* If der_data is NULL, then return false.
* If rsa_context is NULL, then return false.
*
* @retval true RSA context was generated successfully.
* @retval false Invalid DER public key data.
**/
extern bool libspdm_rsa_get_public_key_from_der(const uint8_t *der_data,
size_t der_size,
void **rsa_context);
/**
* Release the specified RSA context.
*
@@ -67,80 +89,6 @@ extern void libspdm_rsa_free(void *rsa_context);
extern bool libspdm_rsa_set_key(void *rsa_context, const libspdm_rsa_key_tag_t key_tag,
const uint8_t *big_number, size_t bn_size);
/**
* Gets the tag-designated RSA key component from the established RSA context.
*
* This function retrieves the tag-designated RSA key component from the
* established RSA context as a non-negative integer (octet string format
* represented in RSA PKCS#1).
* If specified key component has not been set or has been cleared, then returned
* bn_size is set to 0.
* If the big_number buffer is too small to hold the contents of the key, false
* is returned and bn_size is set to the required buffer size to obtain the key.
*
* If rsa_context is NULL, then return false.
* If bn_size is NULL, then return false.
* If bn_size is large enough but big_number is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] rsa_context Pointer to RSA context being set.
* @param[in] key_tag Tag of RSA key component being set.
* @param[out] big_number Pointer to octet integer buffer.
* @param[in, out] bn_size On input, the size of big number buffer in bytes.
* On output, the size of data returned in big number buffer in bytes.
*
* @retval true RSA key component was retrieved successfully.
* @retval false Invalid RSA key component tag.
* @retval false bn_size is too small.
* @retval false This interface is not supported.
**/
extern bool libspdm_rsa_get_key(void *rsa_context, const libspdm_rsa_key_tag_t key_tag,
uint8_t *big_number, size_t *bn_size);
/**
* Generates RSA key components.
*
* This function generates RSA key components. It takes RSA public exponent E and
* length in bits of RSA modulus N as input, and generates all key components.
* If public_exponent is NULL, the default RSA public exponent (0x10001) will be used.
*
* If rsa_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in, out] rsa_context Pointer to RSA context being set.
* @param[in] modulus_length Length of RSA modulus N in bits.
* @param[in] public_exponent Pointer to RSA public exponent.
* @param[in] public_exponent_size Size of RSA public exponent buffer in bytes.
*
* @retval true RSA key component was generated successfully.
* @retval false Invalid RSA key component tag.
* @retval false This interface is not supported.
**/
extern bool libspdm_rsa_generate_key(void *rsa_context, size_t modulus_length,
const uint8_t *public_exponent,
size_t public_exponent_size);
/**
* Validates key components of RSA context.
* NOTE: This function performs integrity checks on all the RSA key material, so
* the RSA key structure must contain all the private key data.
*
* This function validates key components of RSA context in following aspects:
* - Whether p is a prime
* - Whether q is a prime
* - Whether n = p * q
* - Whether d*e = 1 mod lcm(p-1,q-1)
*
* If rsa_context is NULL, then return false.
* If this interface is not supported, then return false.
*
* @param[in] rsa_context Pointer to RSA context to check.
*
* @retval true RSA key components are valid.
* @retval false RSA key components are not valid.
* @retval false This interface is not supported.
**/
extern bool libspdm_rsa_check_key(void *rsa_context);
#endif /* (LIBSPDM_RSA_SSA_SUPPORT) || (LIBSPDM_RSA_PSS_SUPPORT) */
#if LIBSPDM_RSA_SSA_SUPPORT
@@ -260,5 +208,67 @@ extern bool libspdm_rsa_pss_sign(void *rsa_context, size_t hash_nid,
extern bool libspdm_rsa_pss_verify(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size);
#if LIBSPDM_FIPS_MODE
/**
* Carries out the RSA-SSA signature generation with EMSA-PSS encoding scheme for FIPS test.
*
* This function carries out the RSA-SSA signature generation with EMSA-PSS encoding scheme defined in
* RSA PKCS#1 v2.2 for FIPS test.
*
* The salt length is zero.
*
* If the signature buffer is too small to hold the contents of signature, false
* is returned and sig_size is set to the required buffer size to obtain the signature.
*
* If rsa_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384, SHA3_512.
* If sig_size is large enough but signature is NULL, then return false.
*
* @param[in] rsa_context Pointer to RSA context for signature generation.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be signed.
* @param[in] hash_size size of the message hash in bytes.
* @param[out] signature Pointer to buffer to receive RSA-SSA PSS signature.
* @param[in, out] sig_size On input, the size of signature buffer in bytes.
* On output, the size of data returned in signature buffer in bytes.
*
* @retval true signature successfully generated in RSA-SSA PSS.
* @retval false signature generation failed.
* @retval false sig_size is too small.
*
**/
extern bool libspdm_rsa_pss_sign_fips(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
uint8_t *signature, size_t *sig_size);
/**
* Verifies the RSA-SSA signature with EMSA-PSS encoding scheme defined in
* RSA PKCS#1 v2.2 for FIPS test.
*
* The salt length is zero.
*
* If rsa_context is NULL, then return false.
* If message_hash is NULL, then return false.
* If signature is NULL, then return false.
* If hash_size need match the hash_nid. nid could be SHA256, SHA384, SHA512, SHA3_256, SHA3_384, SHA3_512.
*
* @param[in] rsa_context Pointer to RSA context for signature verification.
* @param[in] hash_nid hash NID
* @param[in] message_hash Pointer to octet message hash to be checked.
* @param[in] hash_size size of the message hash in bytes.
* @param[in] signature Pointer to RSA-SSA PSS signature to be verified.
* @param[in] sig_size size of signature in bytes.
*
* @retval true Valid signature encoded in RSA-SSA PSS.
* @retval false Invalid signature or invalid RSA context.
*
**/
extern bool libspdm_rsa_pss_verify_fips(void *rsa_context, size_t hash_nid,
const uint8_t *message_hash, size_t hash_size,
const uint8_t *signature, size_t sig_size);
#endif /*LIBSPDM_FIPS_MODE*/
#endif /* LIBSPDM_RSA_PSS_SUPPORT */
#endif /* CRYPTLIB_RSA_H */

View File

@@ -22,6 +22,29 @@
**/
extern void *libspdm_sm2_dsa_new_by_nid(size_t nid);
/**
* Generates Shang-Mi2 context from DER-encoded public key data.
*
* The public key is ASN.1 DER-encoded as RFC7250 describes,
* namely, the SubjectPublicKeyInfo structure of a X.509 certificate.
*
* @param[in] der_data Pointer to the DER-encoded public key data.
* @param[in] der_size Size of the DER-encoded public key data in bytes.
* @param[out] sm2_context Pointer to newly generated SM2 context which contains the
* SM2 public key component.
* Use libspdm_sm2_free() function to free the resource.
*
* If der_data is NULL, then return false.
* If sm2_context is NULL, then return false.
*
* @retval true SM2 context was generated successfully.
* @retval false Invalid DER public key data.
*
**/
extern bool libspdm_sm2_get_public_key_from_der(const uint8_t *der_data,
size_t der_size,
void **sm2_context);
/**
* Release the specified sm2 context.
*

View File

@@ -1,6 +1,6 @@
/**
* Copyright Notice:
* Copyright 2021-2022 DMTF. All rights reserved.
* Copyright 2021-2024 DMTF. All rights reserved.
* License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/libspdm/blob/main/LICENSE.md
**/
@@ -13,39 +13,6 @@
#include LIBSPDM_CONFIG
#endif
#if defined(LIBSPDM_ENABLE_SET_CERTIFICATE_CAP) && \
!defined(LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP)
#ifdef _MSC_VER
#pragma message("LIBSPDM_ENABLE_SET_CERTIFICATE_CAP is deprecated. Use " \
"LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP instead. This warning will be removed in a " \
"future release.")
#else
#warning LIBSPDM_ENABLE_SET_CERTIFICATE_CAP is deprecated. Use \
LIBSPDM_ENABLE_CAPABILITY_SET_CERTIFICATE_CAP instead. This warning will be removed in a \
future release.
#endif /* _MSC_VER */
#endif /* defined(LIBSPDM_ENABLE_SET_CERTIFICATE_CAP) */
#if defined(LIBSPDM_ENABLE_CHUNK_CAP) && !defined(LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP)
#ifdef _MSC_VER
#pragma message("LIBSPDM_ENABLE_CHUNK_CAP is deprecated. Use LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP " \
"instead. This warning will be removed in a future release.")
#else
#warning LIBSPDM_ENABLE_CHUNK_CAP is deprecated. Use LIBSPDM_ENABLE_CAPABILITY_CHUNK_CAP \
instead. This warning will be removed in a future release.
#endif /* _MSC_VER */
#endif /* defined(LIBSPDM_ENABLE_CHUNK_CAP) */
#if defined(MDEPKG_NDEBUG) && !defined(LIBSPDM_DEBUG_ENABLE)
#ifdef _MSC_VER
#pragma message("MDEPKG_NDEBUG is deprecated. Use LIBSPDM_DEBUG_ENABLE " \
"instead. This warning will be removed in a future release.")
#else
#warning MDEPKG_NDEBUG is deprecated. Use LIBSPDM_DEBUG_ENABLE \
instead. This warning will be removed in a future release.
#endif /* _MSC_VER */
#endif /* defined(MDEPKG_NDEBUG) */
#if defined(LIBSPDM_DEBUG_ENABLE)
#undef LIBSPDM_DEBUG_ASSERT_ENABLE
#undef LIBSPDM_DEBUG_PRINT_ENABLE
@@ -54,16 +21,69 @@
#define LIBSPDM_DEBUG_ASSERT_ENABLE (LIBSPDM_DEBUG_ENABLE)
#define LIBSPDM_DEBUG_PRINT_ENABLE (LIBSPDM_DEBUG_ENABLE)
#define LIBSPDM_DEBUG_BLOCK_ENABLE (LIBSPDM_DEBUG_ENABLE)
#elif defined(MDEPKG_NDEBUG)
#undef LIBSPDM_DEBUG_ASSERT_ENABLE
#undef LIBSPDM_DEBUG_PRINT_ENABLE
#undef LIBSPDM_DEBUG_BLOCK_ENABLE
#define LIBSPDM_DEBUG_ASSERT_ENABLE 0
#define LIBSPDM_DEBUG_PRINT_ENABLE 0
#define LIBSPDM_DEBUG_BLOCK_ENABLE 0
#endif /* defined(LIBSPDM_DEBUG_ENABLE) */
/*when in FIPS mode, only support approved algo in FIPS */
#if LIBSPDM_FIPS_MODE
#undef LIBSPDM_SM2_DSA_P256_SUPPORT
#define LIBSPDM_SM2_DSA_P256_SUPPORT 0
#undef LIBSPDM_SM2_KEY_EXCHANGE_P256_SUPPORT
#define LIBSPDM_SM2_KEY_EXCHANGE_P256_SUPPORT 0
#undef LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT
#define LIBSPDM_AEAD_CHACHA20_POLY1305_SUPPORT 0
#undef LIBSPDM_AEAD_SM4_128_GCM_SUPPORT
#define LIBSPDM_AEAD_SM4_128_GCM_SUPPORT 0
#undef LIBSPDM_SM3_256_SUPPORT
#define LIBSPDM_SM3_256_SUPPORT 0
#endif /*LIBSPDM_FIPS_MODE*/
/* define crypto algorithm without parameter */
#define LIBSPDM_RSA_SSA_SUPPORT ((LIBSPDM_RSA_SSA_2048_SUPPORT) || \
(LIBSPDM_RSA_SSA_3072_SUPPORT) || \
(LIBSPDM_RSA_SSA_4096_SUPPORT))
#define LIBSPDM_RSA_PSS_SUPPORT ((LIBSPDM_RSA_PSS_2048_SUPPORT) || \
(LIBSPDM_RSA_PSS_3072_SUPPORT) || \
(LIBSPDM_RSA_PSS_4096_SUPPORT))
#define LIBSPDM_ECDSA_SUPPORT ((LIBSPDM_ECDSA_P256_SUPPORT) || \
(LIBSPDM_ECDSA_P384_SUPPORT) || \
(LIBSPDM_ECDSA_P521_SUPPORT))
#define LIBSPDM_SM2_DSA_SUPPORT (LIBSPDM_SM2_DSA_P256_SUPPORT)
#define LIBSPDM_EDDSA_SUPPORT ((LIBSPDM_EDDSA_ED25519_SUPPORT) || \
(LIBSPDM_EDDSA_ED448_SUPPORT))
#define LIBSPDM_FFDHE_SUPPORT ((LIBSPDM_FFDHE_2048_SUPPORT) || \
(LIBSPDM_FFDHE_3072_SUPPORT) || \
(LIBSPDM_FFDHE_4096_SUPPORT))
#define LIBSPDM_ECDHE_SUPPORT ((LIBSPDM_ECDHE_P256_SUPPORT) || \
(LIBSPDM_ECDHE_P384_SUPPORT) || \
(LIBSPDM_ECDHE_P521_SUPPORT))
#define LIBSPDM_SM2_KEY_EXCHANGE_SUPPORT (LIBSPDM_SM2_KEY_EXCHANGE_P256_SUPPORT)
#define LIBSPDM_AEAD_GCM_SUPPORT ((LIBSPDM_AEAD_AES_128_GCM_SUPPORT) || \
(LIBSPDM_AEAD_AES_256_GCM_SUPPORT))
#define LIBSPDM_AEAD_SM4_SUPPORT (LIBSPDM_AEAD_SM4_128_GCM_SUPPORT)
#define LIBSPDM_SHA2_SUPPORT ((LIBSPDM_SHA256_SUPPORT) || \
(LIBSPDM_SHA384_SUPPORT) || \
(LIBSPDM_SHA512_SUPPORT))
#define LIBSPDM_SHA3_SUPPORT ((LIBSPDM_SHA3_256_SUPPORT) || \
(LIBSPDM_SHA3_384_SUPPORT) || \
(LIBSPDM_SHA3_512_SUPPORT))
#define LIBSPDM_SM3_SUPPORT (LIBSPDM_SM3_256_SUPPORT)
#if LIBSPDM_CHECK_MACRO
#include "internal/libspdm_macro_check.h"
#endif /* LIBSPDM_CHECK_MACRO */

Some files were not shown because too many files have changed in this diff Show More