mirror of
git://nv-tegra.nvidia.com/tegra/kernel-src/nv-kernel-display-driver.git
synced 2025-12-22 09:11:48 +03:00
Updating prebuilts and/or headers
d13779dbbab1c776db15f462cd46b29f2c0f8c7c - Makefile 7d577fdb9594ae572ff38fdda682a4796ab832ca - COPYING 5728867ce2e96b63b29367be6aa1c0e47bcafc8f - SECURITY.md 6b73bf6a534ddc0f64e8ba88739381c3b7fb4b5c - nv-compiler.sh 05e911b99b109a721d2045f025b21189e2718e60 - README.md ec5f1eb408e0b650158e0310fb1ddd8e9b323a6f - CONTRIBUTING.md af3ee56442f16029cb9b13537477c384226b22fc - CODE_OF_CONDUCT.md 07bd07999f296d935386a8edf719d0e296f63227 - kernel-open/Kbuild 45b68e3eacda04dcadce48a8238574302a71a3ca - kernel-open/Makefile 99f4563141af1278f13cb23a6e6c24d21d583d7b - kernel-open/conftest.sh 0b1508742a1c5a04b6c3a4be1b48b506f4180848 - kernel-open/dkms.conf 19a5da412ce1557b721b8550a4a80196f6162ba6 - kernel-open/common/inc/os_dsi_panel_props.h 4750735d6f3b334499c81d499a06a654a052713d - kernel-open/common/inc/nv-caps.h 92de3baafe321dd0dcf8665aae4614d5ac670718 - kernel-open/common/inc/rs_access.h 60ef64c0f15526ae2d786e5cec07f28570f0663b - kernel-open/common/inc/conftest.h 880e45b68b19fdb91ac94991f0e6d7fc3b406b1f - kernel-open/common/inc/nv-pci-types.h 6d2f660ef0942edf664874f260266ec81cd0ff08 - kernel-open/common/inc/nvtypes.h c45b2faf17ca2a205c56daa11e3cb9d864be2238 - kernel-open/common/inc/nv-modeset-interface.h 5bc7a748c7d3dfa6559ca4f9fe6199e17098ec8f - kernel-open/common/inc/nv-lock.h b249abc0a7d0c9889008e98cb2f8515a9d310b85 - kernel-open/common/inc/nvgputypes.h e4a4f57abb8769d204468b2f5000c81f5ea7c92f - kernel-open/common/inc/nv-procfs.h 8b19b93e958aca626899f035334a4c96f8776eb6 - kernel-open/common/inc/nv.h ede1f77acb43e28391bceac058e00a7a8d799b0d - kernel-open/common/inc/nvmisc.h ae374d3e438f8d3b60df8c4602618c58564b73f9 - kernel-open/common/inc/rm-gpu-ops.h 3f7b20e27e6576ee1f2f0557d269697a0b8af7ec - kernel-open/common/inc/nv-firmware-registry.h 5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - kernel-open/common/inc/dce_rm_client_ipc.h 3e8075872e2efa843b74b884ef5098468edc4f18 - kernel-open/common/inc/nvimpshared.h befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - kernel-open/common/inc/nv_stdarg.h 0e70d16576584082ee4c7f3ff9944f3bd107b1c1 - kernel-open/common/inc/cpuopsys.h d7ab0ee225361daacd280ff98848851933a10a98 - kernel-open/common/inc/nv-list-helpers.h b02c378ac0521c380fc2403f0520949f785b1db6 - kernel-open/common/inc/nv-dmabuf.h a3d1e51c0f4217f1dc4cb0c48aa0eafd054d4e5e - kernel-open/common/inc/nv-procfs-utils.h 81592e5c17bebad04cd11d73672c859baa070329 - kernel-open/common/inc/nv-chardev-numbers.h 61cf8f3fd32142dc402f6802b5d4c9af6c875c35 - kernel-open/common/inc/nv-firmware.h d5253e7e4abd3ad8d72375260aa80037adcd8973 - kernel-open/common/inc/nv_dpy_id.h 61a9589e4a8ec122e5a6c2258658d493ee747897 - kernel-open/common/inc/nv-platform.h b986bc6591ba17a74ad81ec4c93347564c6d5165 - kernel-open/common/inc/nvkms-format.h 4f487eccd762f3ca645a685d5c333ff569e7987c - kernel-open/common/inc/nv-kthread-q-os.h 4015c4557ea0790a2bdf5695832c89e31d75aee9 - kernel-open/common/inc/nvlimits.h 143051f69a53db0e7c5d2f846a9c14d666e264b4 - kernel-open/common/inc/nv-kref.h 56f432032bef4683c2801f46bec5065923475fb1 - kernel-open/common/inc/nv-kthread-q.h b4c5d759f035b540648117b1bff6b1701476a398 - kernel-open/common/inc/nvCpuUuid.h 67a9707c568e167bae4404c7785ed614babb7b82 - kernel-open/common/inc/nv-linux.h 7c7888550b12eeb98128ea9ac771b897327f538e - kernel-open/common/inc/nv-hypervisor.h f9cb3701681994ff6f32833892d900b0da2b89f6 - kernel-open/common/inc/nv-pgprot.h b8700a911ac85770bf25d70b9692308af63966bd - kernel-open/common/inc/nvstatuscodes.h 3a5f4f105672921b857fec7f2b577d9d525afe37 - kernel-open/common/inc/nv-timer.h 5cd0b3f9c7f544e9064efc9b5ba4f297e5494315 - kernel-open/common/inc/nv-time.h 7a78f354e0b68f03d6ab566d5b755e299456f361 - kernel-open/common/inc/os_gpio.h 154abd192eb950fecffcca470ee80b27f224fd79 - kernel-open/common/inc/nv-proto.h 2eb11e523a3ecba2dcd68f3146e1e666a44256ae - kernel-open/common/inc/nv-ioctl.h 1328058925b64e97588d670fe70466b31af7c7c1 - kernel-open/common/inc/nv-mm.h 25d89847c11449b329941a26f04aec955cfaf150 - kernel-open/common/inc/nv-pci.h 95bf694a98ba78d5a19e66463b8adda631e6ce4c - kernel-open/common/inc/nvstatus.h d74a8d4a9ae3d36e92b39bc7c74b27df44626b1c - kernel-open/common/inc/nv_mig_types.h b3258444b6a2c2399f5f00c7cac5b470c41caeaa - kernel-open/common/inc/nv-hash.h 4c856c1324060dcb5a9e72e5e82c7a60f6324733 - kernel-open/common/inc/nvkms-kapi.h 44cb5bc2bc87a5c3447bcb61f2ce5aef08c07fa7 - kernel-open/common/inc/nv_uvm_interface.h 1e7eec6561b04d2d21c3515987aaa116e9401c1f - kernel-open/common/inc/nv-kernel-interface-api.h c54c62de441828282db9a4f5b35c2fa5c97d94f1 - kernel-open/common/inc/nvkms-api-types.h ade7410c1c0572dbed49b4b0d97b87245ca59115 - kernel-open/common/inc/os-interface.h 2ffd0138e1b3425ade16b962c3ff02a82cde2e64 - kernel-open/common/inc/nv-ioctl-numa.h 995d8447f8539bd736cc09d62983ae8ebc7e3436 - kernel-open/common/inc/nv_common_utils.h c75bfc368c6ce3fc2c1a0c5062834e90d822b365 - kernel-open/common/inc/nv-memdbg.h dfd7b82a7f2939d4c1869840059705c6b71bffe3 - kernel-open/common/inc/nv-msi.h 3b12d770f8592b94a8c7774c372e80ad08c5774c - kernel-open/common/inc/nvi2c.h 894ef9e230604572bbceabdfd5f241059d54aa10 - kernel-open/common/inc/nv_speculation_barrier.h 107d1ecb8a128044260915ea259b1e64de3defea - kernel-open/common/inc/nv-ioctl-numbers.h 19cfcbf5a3021aa9aaa0ceacbb6711e7f7a6e09e - kernel-open/common/inc/nv_uvm_user_types.h cfcd2ef5eaec92f8e4647fff02a3b7e16473cbff - kernel-open/common/inc/nv_uvm_types.h b642fb649ce2ba17f37c8aa73f61b38f99a74986 - kernel-open/common/inc/nv-retpoline.h 3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - kernel-open/common/inc/nv-gpu-info.h cda75171ca7d8bf920aab6d56ef9aadec16fd15d - kernel-open/common/inc/os/nv_memory_type.h 70b67003fda6bdb8a01fa1e41c3b0e25136a856c - kernel-open/common/inc/os/nv_memory_area.h 11b09260232a88aa1f73f109fdfab491a7b73576 - kernel-open/nvidia/nv-nano-timer.c dcf4427b83cce7737f2b784d410291bf7a9612dc - kernel-open/nvidia/nv-reg.h 0b8ff957fb14f20ba86f61e556d1ab15bf5acd74 - kernel-open/nvidia/nv-imp.c 6b09b5ef8a37f78c8e82074b06b40ef593c81807 - kernel-open/nvidia/libspdm_rsa.c b8d361216db85fe897cbced2a9600507b7708c61 - kernel-open/nvidia/libspdm_hkdf_sha.c 66e2bfc490fb77e0b72a8192b719d3dc74d25d59 - kernel-open/nvidia/nv-pat.c 26a30f2d26c2a97a6e2ee457d97d32f48b0bf25b - kernel-open/nvidia/nv-vm.c b8a770cea0629c57d8b0b3d7414d7b0f043ee8cf - kernel-open/nvidia/libspdm_ecc.c 4c183eb39251cd78d90868ec6f75ebc7a37e6644 - kernel-open/nvidia/os-usermap.c 8c30b6230439edcbec62636cc93be512bca8637f - kernel-open/nvidia/nv-usermap.c 7af675f85642229b7e7de05dcadd622550fe7ad7 - kernel-open/nvidia/nv-vtophys.c d11ab03a617b29efcf00f85e24ebce60f91cf82c - kernel-open/nvidia/nv-backlight.c ef8fd76c55625aeaa71c9b789c4cf519ef6116b2 - kernel-open/nvidia/libspdm_hkdf.c 1590794925ebd9cbc14aae8c47e0cc205a3f4b52 - kernel-open/nvidia/nv-rsync.h 934a686ba8d7b77cce2d928cb3b04f611d9f9187 - kernel-open/nvidia/libspdm_aead.c f16e6a33b5004566333fb8b99504a0fb95d51226 - kernel-open/nvidia/nv-gpio.c 8ed2c3b93eeaa52342d944e794180fd5d386688a - kernel-open/nvidia/libspdm_rsa_ext.c 2e5d18118835c19c5ca7edee9bceeae613b9d7f9 - kernel-open/nvidia/nv-procfs.c 3e820e66f556be10c0d9728d4187e43c30658736 - kernel-open/nvidia/nv.c 65fe797fb5d4af2db67544ddb79d49ab1b7ca859 - kernel-open/nvidia/nv-dsi-parse-panel-props.c e3efae4ed920545062a2d06064df8be1a2a42135 - kernel-open/nvidia/nv-caps-imex.h 8c64e75aaaa9ac6f17aae7ed62db23eb2e5b9953 - kernel-open/nvidia/nv_uvm_interface.c 4563589496a93a2720e25807ca1be2565f03554c - kernel-open/nvidia/nv-bpmp.c aea97021d9aa023a357f009fcddc710f710ceb5e - kernel-open/nvidia/libspdm_x509.c f29e5bc1c7bd2c670780cdbb7275900a69f4d205 - kernel-open/nvidia/internal_crypt_lib.h 13dc24fb41516c777328d4db64fa39a9e2c40191 - kernel-open/nvidia/nv-modeset-interface.c 6ae527b69eebb44224b05e8cb3546757532d8a16 - kernel-open/nvidia/nv-dma.c fe204e3820d206b5b0c34a51084f39b97310305a - kernel-open/nvidia/nv-ipc-soc.c 60d6ff5becc0ddbcf4b489b9d88c1dec8ccc67be - kernel-open/nvidia/nv-platform-pm.c c1f7c81018a414b7a657431b115a1b86d3ebe3e7 - kernel-open/nvidia/os-mlock.c c762aa186dc72ed0b9183492f9bd187c301d33d3 - kernel-open/nvidia/nv-kthread-q.c 70bece14e12b9ffc92816ee8159a4ce596579d78 - kernel-open/nvidia/os-pci.c a677049bb56fa5ebe22fe43b0c4a12acd58a6677 - kernel-open/nvidia/nv-p2p.c e4d12f027cb5f74124da71bbbc23bcb33651834a - kernel-open/nvidia/nv-pci-table.c 415b8f457c01417f32c998ae310b5a42dd5805cb - kernel-open/nvidia/nv-pci.c 6dfc57ac42bed97c6ff81d82e493f05b369e0b84 - kernel-open/nvidia/nvspdm_cryptlib_extensions.h bba706cfbc04b3a880b5e661066f92e765fad663 - kernel-open/nvidia/nv-caps-imex.c ed3c83f62e4ccc4b53d886eedd4b47518a361393 - kernel-open/nvidia/nv-dmabuf.c 66b7fad4d73a23153298ce777afb14d2c8be42c1 - kernel-open/nvidia/libspdm_internal_crypt_lib.c 6d4fbea733fdcd92fc6a8a5884e8bb359f9e8abd - kernel-open/nvidia/rmp2pdefines.h b71bf4426322ab59e78e2a1500509a5f4b2b71ab - kernel-open/nvidia/nv-pat.h 9a5a58bd6eb71a4c32e334a1a4e3326a17143cce - kernel-open/nvidia/os-interface.c 1a91f5e6d517856303da448bea80d167b238e41c - kernel-open/nvidia/nv-i2c.c 7d409e3f0255d17457bffbf318e2f9ea160680a5 - kernel-open/nvidia/nv-pci-table.h c50865d3070a0c3476ce24ff1ab4cc4e3f9ea4be - kernel-open/nvidia/detect-self-hosted.h 7ae9a57b9e99fd2a3534798e52e57f7784738a53 - kernel-open/nvidia/nv-report-err.c 3b27e4eaa97bd6fa71f1a075b50af69b1ec16454 - kernel-open/nvidia/libspdm_ec.c dd9e367cba9e0672c998ec6d570be38084a365ab - kernel-open/nvidia/libspdm_rand.c d8b8077adb7fd70eb9528d421bdef98c4378b57a - kernel-open/nvidia/nv-msi.c 1cabb1e7fa825216c09f9d2f103657b0ac2dc85a - kernel-open/nvidia/nv-platform.c dd819a875c584bc469082fcf519779ea00b1d952 - kernel-open/nvidia/libspdm_aead_aes_gcm.c 74958745f83b14c04aaa60248bf5c86ceef6b5cb - kernel-open/nvidia/nv-acpi.c 4d19a1756af848d25fd2fd8cc691dcbcf0afb776 - kernel-open/nvidia/os-registry.c 80f9ac558a57c60cbf70f3ecaf73c71e60c98885 - kernel-open/nvidia/nv-rsync.c 7f5d251db1db4a179a67efea0178fbfda94f95d0 - kernel-open/nvidia/nv_gpu_ops.h 642c3a7d10b263ab9a63073f83ad843566927b58 - kernel-open/nvidia/libspdm_hmac_sha.c 7d53c2d27580d1b2cc56246d9972f3f310a3cd34 - kernel-open/nvidia/nv-clk.c 0f28ebcdb723e836c923e40642429838fa9e86dc - kernel-open/nvidia/nvidia-sources.Kbuild 99540efd2dfa6907b84e628e12370eefb0222850 - kernel-open/nvidia/nv-mmap.c 11ac7a3a3b4def7fa31a289f5f8461ad90eca06b - kernel-open/nvidia/nv-tracepoint.h a14b9115cff1e5e7491737083588a5646c8c227b - kernel-open/nvidia/nv-report-err.h 011f975d4f94f7b734efa23d3c8075321eaaf0e8 - kernel-open/nvidia/nv-memdbg.c 1ba353673c266cb47ebcd07707e8ce125353e751 - kernel-open/nvidia/nvidia.Kbuild ac976b92e83f19125d6b3f7e95d9523e430b9b09 - kernel-open/nvidia/nv-p2p.h 9b036018501d9b8543aabe7ec35dbe33023bb3e0 - kernel-open/nvidia/nv-host1x.c 11778961efc78ef488be5387fa3de0c1b761c0d9 - kernel-open/nvidia/libspdm_sha.c 02b1936dd9a9e30141245209d79b8304b7f12eb9 - kernel-open/nvidia/nv-cray.c 2d61ad39b2356c9cfd8d57c1842e80a20272e37f - kernel-open/nvidia/nv-caps.c fc199c04b321db79ab5446574d9b994f8bfe6c24 - kernel-open/nvidia/libspdm_shash.c fa178a7209f56008e67b553a2c5ad1b2dd383aac - kernel-open/nvidia/hal/library/cryptlib/cryptlib_rng.h 34de62da6f880ba8022299c77eddbb11d7fc68d2 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_hash.h 8af43a3f0e4201aa6ff0099221a371fb1801e818 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_rsa.h cf94004b7b5729982806f7d6ef7cc6db53e3de56 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_aead.h 9a6e164ec60c2feb1eb8782e3028afbffe420927 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_mac.h 4991dfa8852edbdd1ffbd2d44f7b6ac4e1c8c752 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_ec.h 7694b027d74d65561ce6cd15a8c0822e4b32b73a - kernel-open/nvidia/hal/library/cryptlib/cryptlib_sm2.h 8b84a0cc1127f39652362007e048ea568c9cf80b - kernel-open/nvidia/hal/library/cryptlib/cryptlib_ecd.h 2d7b566655ba8a05fae4ea4f6c806b75d7ebb5f3 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_cert.h 0dcb1fd3982e6307b07c917cb453cddbcd1d2f43 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_dh.h 7ff12b437215b77c920a845943e4101dcde289c4 - kernel-open/nvidia/hal/library/cryptlib/cryptlib_hkdf.h 16dd525c52448a32cc8da75d6a644d8a35efbfee - kernel-open/nvidia/library/spdm_lib_config.h 53a9acf65cad6bc4869a15d8086990365c987456 - kernel-open/nvidia/library/cryptlib.h cfbaebb1091f7b1a8d2e3c54c2301ac45ade6c40 - kernel-open/nvidia/internal/libspdm_lib_config.h 2ea094687fbee1e116cd0362cbeba7592439e0b6 - kernel-open/nvidia-drm/nvidia-drm-crtc.h bed7b5053d09473188061b0d7f6a3a65b64f72e0 - kernel-open/nvidia-drm/nvidia-drm-linux.c 0f8e4535cf97fadea23c9848483355583f492131 - kernel-open/nvidia-drm/nvidia-drm-utils.c 35034b6f174cd6a14b7d94a07f777794570959b4 - kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h 072e1d6a260e348dada181162949eee190321ed8 - kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c e86dac2985f4e61f4e2676b3290e47cdcb951c46 - kernel-open/nvidia-drm/nvidia-drm-modeset.c f00a605cac7ffc7f309e3952c5d4cea7cbfc0b7e - kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h 99642b76e9a84b5a1d2e2f4a8c7fb7bcd77a44fd - kernel-open/nvidia-drm/nvidia-drm.h 763833186eabf1a0501434426c18161febf624d4 - kernel-open/nvidia-drm/nvidia-drm-fb.h 4bada3ff7bfee8b7e222fc4cafb2ac97c67d7898 - kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h 99a2e922a448b4d76318ec151378c8bbf5971595 - kernel-open/nvidia-drm/nvidia-drm-helper.c ae6efc1bbec8a5e948b7244f4801f0b4b398f203 - kernel-open/nvidia-drm/nvidia-drm.c 94c28482252c983fd97532634ffafea0bf77337a - kernel-open/nvidia-drm/nvidia-drm-ioctl.h a4f77f8ce94f63f3ca2a970c1935d8da48ab5ccc - kernel-open/nvidia-drm/nvidia-drm-format.c b78e4f40234f908e722f172485e4466d80b7b501 - kernel-open/nvidia-drm/nvidia-drm-drv.h 4154c5562cebd2747bd15fb302c19cb0cefe1c9c - kernel-open/nvidia-drm/nvidia-drm-connector.h c762aa186dc72ed0b9183492f9bd187c301d33d3 - kernel-open/nvidia-drm/nv-kthread-q.c e4d12f027cb5f74124da71bbbc23bcb33651834a - kernel-open/nvidia-drm/nv-pci-table.c 47110750cf788e7d9ddb5db85be3658ac660a109 - kernel-open/nvidia-drm/nvidia-drm-fence.h 73a1acab50e65c468cb71b65238a051bc306ae70 - kernel-open/nvidia-drm/nvidia-drm-encoder.h aa388c0d44060b8586967240927306006531cdb7 - kernel-open/nvidia-drm/nvidia-drm-helper.h d0b4f4383a7d29be40dd22e36faa96dae12d2364 - kernel-open/nvidia-drm/nvidia-drm-os-interface.h 63a2fec1f2c425e084bdc07ff05bda62ed6b6ff1 - kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c a46422076a6a3e439349fbda4fc46e4add29b8e5 - kernel-open/nvidia-drm/nvidia-drm-drv.c 19031f2eaaaeb0fa1da61681fa6048c3e303848b - kernel-open/nvidia-drm/nvidia-drm-gem.c 71ea2d5b02bf8fb3e8cf6b7c84686e2edbc244d0 - kernel-open/nvidia-drm/nvidia-drm-encoder.c 7d409e3f0255d17457bffbf318e2f9ea160680a5 - kernel-open/nvidia-drm/nv-pci-table.h 9f57b8724205e03ca66b32fe710cd36b82932528 - kernel-open/nvidia-drm/nvidia-drm-conftest.h 6e9838b169beffe149ba12625acb496504d36d50 - kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c d2525a36b7aec71982df80a89b861f220312103d - kernel-open/nvidia-drm/nvidia-dma-resv-helper.h a505f0aa98ebcf438307f6bacf9bf5a5be189839 - kernel-open/nvidia-drm/nvidia-drm-connector.c d5518597469dc874ee7e264b9400db51af2fcd44 - kernel-open/nvidia-drm/nvidia-drm-format.h 437d87e7e4bd34ae3c67b27c2faaa394575acf70 - kernel-open/nvidia-drm/nvidia-drm-priv.h 88b2035ddbba8c7f455209e61256b4e7b09c11dd - kernel-open/nvidia-drm/nvidia-drm-fence.c eff6a0b72274c8824b7a79e9aee261da3a6fb4f1 - kernel-open/nvidia-drm/nvidia-drm-gem.h 6528efa1f8061678b8543c5c0be8761cab860858 - kernel-open/nvidia-drm/nvidia-drm-modeset.h 46a41b0b3470190abcdc57a739238a9cd773812b - kernel-open/nvidia-drm/nvidia-drm.Kbuild 995d8447f8539bd736cc09d62983ae8ebc7e3436 - kernel-open/nvidia-drm/nv_common_utils.h 40b5613d1fbbe6b74bff67a5d07974ad321f75f0 - kernel-open/nvidia-drm/nvidia-drm-utils.h d924c494620760887546f428f87387d8ed5b99a6 - kernel-open/nvidia-drm/nvidia-drm-fb.c 5eb8385042f3efa5c2e14d168cdb40b211467552 - kernel-open/nvidia-drm/nvidia-drm-crtc.c 62a9b9b30fd7417d9ab085b2bfc731aadd9826f9 - kernel-open/nvidia-drm/nvidia-drm-os-interface.c ca86fee8bd52e6c84e376199c5f3890078bc2031 - kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h 04ea084a5c5d496cc43103d1997053246a2fa94c - kernel-open/nvidia-modeset/nvidia-modeset-linux.c b2a5ddfd8dcb3000b9d102bd55b5b560730e81d5 - kernel-open/nvidia-modeset/nvkms.h c762aa186dc72ed0b9183492f9bd187c301d33d3 - kernel-open/nvidia-modeset/nv-kthread-q.c da6fd16e29300170aba8a652ea6296241f66243b - kernel-open/nvidia-modeset/nvidia-modeset.Kbuild 2ea1436104463c5e3d177e8574c3b4298976d37e - kernel-open/nvidia-modeset/nvkms-ioctl.h 13d4f9648118dd25b790be0d8d72ebaa12cc8d0e - src/common/sdk/nvidia/inc/rs_access.h 579be4859587206460d8729804aab19180fb69bb - src/common/sdk/nvidia/inc/nvtypes.h 993f17e3094243623f793ae16bd84b5fa3f335ec - src/common/sdk/nvidia/inc/g_finn_rm_api.h a54d77d45f9b0c5ae3fa8b59d2117145260800b6 - src/common/sdk/nvidia/inc/cc_drv.h b249abc0a7d0c9889008e98cb2f8515a9d310b85 - src/common/sdk/nvidia/inc/nvgputypes.h 78a4b6b19a38de41527ef8b290754deca5906817 - src/common/sdk/nvidia/inc/nvcd.h ede1f77acb43e28391bceac058e00a7a8d799b0d - src/common/sdk/nvidia/inc/nvmisc.h 46966ed7fc8d85931b49b12683c42666181f33f6 - src/common/sdk/nvidia/inc/nvimpshared.h befb2c0bf0a31b61be5469575ce3c73a9204f4e9 - src/common/sdk/nvidia/inc/nv_stdarg.h f5a682339a89d2b119b43e5b9263dd67346ed3bc - src/common/sdk/nvidia/inc/cpuopsys.h cf1de27d5bcbd0adbe3c3b64466193b7d9094c71 - src/common/sdk/nvidia/inc/nverror.h 4015c4557ea0790a2bdf5695832c89e31d75aee9 - src/common/sdk/nvidia/inc/nvlimits.h 7c7888550b12eeb98128ea9ac771b897327f538e - src/common/sdk/nvidia/inc/nv-hypervisor.h b8700a911ac85770bf25d70b9692308af63966bd - src/common/sdk/nvidia/inc/nvstatuscodes.h 95bf694a98ba78d5a19e66463b8adda631e6ce4c - src/common/sdk/nvidia/inc/nvstatus.h a506a41b8dcf657fb39a740ffc1dfd83835d6c89 - src/common/sdk/nvidia/inc/nvcfg_sdk.h 1e7eec6561b04d2d21c3515987aaa116e9401c1f - src/common/sdk/nvidia/inc/nv-kernel-interface-api.h af0bc90b3ad4767de53b8ff91e246fdab0146e8b - src/common/sdk/nvidia/inc/nvsecurityinfo.h 5cec5038e1f4a395a08b765c8361a9560f3312b7 - src/common/sdk/nvidia/inc/nvdisptypes.h c8b96af9d498f87cb9acde064648f9e84d789055 - src/common/sdk/nvidia/inc/nv_vgpu_types.h 3b12d770f8592b94a8c7774c372e80ad08c5774c - src/common/sdk/nvidia/inc/nvi2c.h bbf6c09ef9bb10ab63d337bf011872f9073c3e5b - src/common/sdk/nvidia/inc/nvos.h 9bca638f5832d831880f090c583fac6fc8cf6ee6 - src/common/sdk/nvidia/inc/dpringbuffertypes.h 7de14a0c3cc8460a9c41e1ee32fda5409c5b9988 - src/common/sdk/nvidia/inc/mmu_fmt_types.h 774318ced0fdcb199e99cf0fee9688259dd01a51 - src/common/sdk/nvidia/inc/nvfixedtypes.h ed51b6e2d454af3da36f9c5f4a8a7958d2c5f156 - src/common/sdk/nvidia/inc/alloc/alloc_channel.h ffe618524466cbbff64de55d88fd987e198bb8c9 - src/common/sdk/nvidia/inc/class/cl9271.h cef74c734fc7d2f32ff74095c59212d9e1d4cafc - src/common/sdk/nvidia/inc/class/cl84a0.h 9f8a45cb986e3ad2bd4a8900469fe5f8b0c9463a - src/common/sdk/nvidia/inc/class/cl9870.h a6bb32861fa3f93ccb16490f0f2751a1ef333eed - src/common/sdk/nvidia/inc/class/cl0101.h e6818f1728a66a70080e87dac15a6f92dd875b4e - src/common/sdk/nvidia/inc/class/cl927d.h 522682a17bacd5c1d6081c0020d094ee3d5c4a30 - src/common/sdk/nvidia/inc/class/clcb97.h 89d4eeb421fc2be3b9717e333e9ff67bfffa24e8 - src/common/sdk/nvidia/inc/class/cl2080.h f558fddfdc088b86a1b479542b8e782e42a5bdce - src/common/sdk/nvidia/inc/class/clc37a.h d301edef2d1dd42382670e5a6ceef0d8caf67d28 - src/common/sdk/nvidia/inc/class/cl90cd.h 1dfae8f11f8e92908f59a1c9493e84ce40d53b90 - src/common/sdk/nvidia/inc/class/cl0070.h 95d99f0805c8451f0f221483b3618e4dbd1e1dd8 - src/common/sdk/nvidia/inc/class/cl90f1.h 99a34eee22f584d5dfb49c3018a8cb9a7b1035ed - src/common/sdk/nvidia/inc/class/cl5070_notification.h c4f090f0dae5bdebf28c514c1b5a9bd8606aa56c - src/common/sdk/nvidia/inc/class/cl9097.h 4b77798281f3754a80961308d44a70b1a717283b - src/common/sdk/nvidia/inc/class/clc46f.h bd2a88f8dbc64add00ad366aa3e76d116cb090b3 - src/common/sdk/nvidia/inc/class/cl0073.h e587a693bc1cee68983a7039ddbf16a3d3461d64 - src/common/sdk/nvidia/inc/class/cl9471.h ddbffcce44afa7c07924fd64a608f7f3fe608ccc - src/common/sdk/nvidia/inc/class/cl0071.h 74c75472658eea77d031bf3979dd7fe695b4293f - src/common/sdk/nvidia/inc/class/cl0092_callback.h fd16daebcd23a680b988dde4ae99625434dcb8fa - src/common/sdk/nvidia/inc/class/cl0000.h c2d8bb02052e80cd0d11695e734f5e05ab7faeb5 - src/common/sdk/nvidia/inc/class/cl907dswspare.h 5ca1d01dab6b9e814160ddce868d00aa9a1ead58 - src/common/sdk/nvidia/inc/class/clc873.h 7c7406d40a09372dcae2aaf3fcad225c3dd2cf3f - src/common/sdk/nvidia/inc/class/cl9010_callback.h 2240664ad950c9c2e64b6f4d18e05349bc91443c - src/common/sdk/nvidia/inc/class/clc573.h 593384ce8938ceeec46c782d6869eda3c7b8c274 - src/common/sdk/nvidia/inc/class/cl900e.h 101da471fe4e167815425793491e43193e407d9a - src/common/sdk/nvidia/inc/class/clc397.h dec74b9cf8062f1a0a8bbeca58b4f98722fd94b0 - src/common/sdk/nvidia/inc/class/cl0076.h 46f74fc51a7ec532330e966cad032782e80808b8 - src/common/sdk/nvidia/inc/class/clcc7b.h 053e3c0de24348d3f7e7fe9cbd1743f46be7a978 - src/common/sdk/nvidia/inc/class/cl0004.h 71e34a03bcfa70edfbec4dbdeade82a932057938 - src/common/sdk/nvidia/inc/class/clc637.h 447fe99b23c5dbe3d2a7601e8228a1a1831c6705 - src/common/sdk/nvidia/inc/class/clcc70.h 89ed6dd37fca994e18e03a5410d865b88e1ff776 - src/common/sdk/nvidia/inc/class/clc87e.h 03d873c3a0e0376440f23171640d9c517f7a34e9 - src/common/sdk/nvidia/inc/class/cl902d.h 78259dc2a70da76ef222ac2dc460fe3caa32457a - src/common/sdk/nvidia/inc/class/clc37e.h b7a5b31a8c3606aa98ba823e37e21520b55ba95c - src/common/sdk/nvidia/inc/class/cl402c.h 5ee1adc8d952212b37211c6f4f677ba672f5117c - src/common/sdk/nvidia/inc/class/clcc71.h bd12f7cdc3a01668b9c486dc6456f9263dd459ea - src/common/sdk/nvidia/inc/class/clc57b.h 4b2f2194a1655cc6ae707866f130bbe357d0c21f - src/common/sdk/nvidia/inc/class/clb097tex.h 5409e5af182ac18ef8d13380bdfe7cf2e83d37d7 - src/common/sdk/nvidia/inc/class/clc37b.h aeb4cbab8d1d0fbd0a5747fa36d6f56c00234b2d - src/common/sdk/nvidia/inc/class/clc097tex.h 36fd6906e2688dad2e7ab648be7e070b9eb6f11d - src/common/sdk/nvidia/inc/class/clc971.h 513c505274565fa25c5a80f88a7d361ffbcb08c3 - src/common/sdk/nvidia/inc/class/cl0005.h 53e6252cd85a60698c49a721f4e41da1cb14e5bd - src/common/sdk/nvidia/inc/class/clc97dswspare.h 645adeb829dbcf315bf67ff8387e7a5d982d7b6e - src/common/sdk/nvidia/inc/class/cl00de.h 0f91db32d9e346b4d9f3762c9e59a8f8e5fd0903 - src/common/sdk/nvidia/inc/class/clcc7d.h a24c2a943c7ceceb8d015f5cd02148f8c4e7c23d - src/common/sdk/nvidia/inc/class/clb097.h 691bb932ea3f60d2b9ad3e4d7fa53ab1a2a5e6c5 - src/common/sdk/nvidia/inc/class/clc870.h 758e2fb8b5d89079f03be09d74964e9246cb180c - src/common/sdk/nvidia/inc/class/clc797.h f4af32374be4d05a2e55c97053a4f0d1f4b85154 - src/common/sdk/nvidia/inc/class/cl0000_notification.h 1e578eb23dacca047e0b342cce3024b3134f8de9 - src/common/sdk/nvidia/inc/class/clc7b5.h 941a031920c0b3bb16473a6a3d4ba8c52c1259d7 - src/common/sdk/nvidia/inc/class/cl917e.h b23cdfb66f40c6d9a903f602b8ff4526063b5a2d - src/common/sdk/nvidia/inc/class/clc097.h 0de3548dde4e076cbd0446330b2d5ae4862c1501 - src/common/sdk/nvidia/inc/class/clc973.h ddb996ff90b80c0f58729b9ac89fa6d2d3950e49 - src/common/sdk/nvidia/inc/class/cla16f.h cb610aaae807d182b4a2ee46b9b43ebfa4a49a08 - src/common/sdk/nvidia/inc/class/clc57e.h 9e1d2f90d77e23f1d2163a8f8d8d747058e21947 - src/common/sdk/nvidia/inc/class/cl9010.h 7a14243de2b228f086810f968a1712627f1333fd - src/common/sdk/nvidia/inc/class/clc36f.h 7c8e1f1055f9522cfb2935ea0aae612ef172c26e - src/common/sdk/nvidia/inc/class/clc370_notification.h 64ad2ab88e2006bcdace06e7109981496c39f265 - src/common/sdk/nvidia/inc/class/clc87d.h 36c6162356ac39346c8900b1e0074e4b614d4b5a - src/common/sdk/nvidia/inc/class/clc370.h 5df0ce4eb733554e963eb3c7938396f58f2dd4d5 - src/common/sdk/nvidia/inc/class/cl2081.h a4d82d12346918edd0a7564a5c6cbfe849532b7f - src/common/sdk/nvidia/inc/class/clca70.h 159b78a13e43a2afe6c17714a6f8619675480346 - src/common/sdk/nvidia/inc/class/clc86f.h 6ddba2e93c046ae04f48685c73f8f2d9fe74a398 - src/common/sdk/nvidia/inc/class/clc67a.h 83c6378ef27c8b640895a123801d27e6c4fd3754 - src/common/sdk/nvidia/inc/class/clc671.h 7f75433a769a020d9f36996c855c8ce6ab39dd83 - src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h 31ac68401e642baf44effb681d42374f42cf86b1 - src/common/sdk/nvidia/inc/class/cl00c3.h 95ca0b08eed54d1c6dd76fdf9cf4715007df1b20 - src/common/sdk/nvidia/inc/class/cl0020.h 20d5608c2d6e55efd6d1756a00739f7a05d3a2b3 - src/common/sdk/nvidia/inc/class/clc361.h 9797f4758d534181eeaa6bc88d576de43ba56045 - src/common/sdk/nvidia/inc/class/clc574.h a39d75d3e479aebaf3849415e156c3cfe427298a - src/common/sdk/nvidia/inc/class/clc771.h eac86d7180236683b86f980f89ec7ebfe6c85791 - src/common/sdk/nvidia/inc/class/cl957d.h f7a2fea4725d59e95294c397ede001504b777b0d - src/common/sdk/nvidia/inc/class/clc697.h f3f33f70ec85c983acec8862ccaabf5b186de2bb - src/common/sdk/nvidia/inc/class/cl9270.h 8b94512c9746c6976c4efeee0291bf44bb5e0152 - src/common/sdk/nvidia/inc/class/clcc73.h 60d0c7923699599a5a4732decfbcb89e1d77b69e - src/common/sdk/nvidia/inc/class/cl9770.h e0c9a155f829c158c02c21b49c083168f8b00cbe - src/common/sdk/nvidia/inc/class/clc37dswspare.h 499bc681107a2b7ad7af3d2211b582b8fb9d9761 - src/common/sdk/nvidia/inc/class/clcc7a.h e1bfd0c78f397e7c924c9521f87da8286bebe3f1 - src/common/sdk/nvidia/inc/class/cl84a0_deprecated.h 2f291dc867e71f625c59f72787b9fb391a16d0e6 - src/common/sdk/nvidia/inc/class/clc638.h 8d2dcc086f892dd58270c9e53e747513ed4b2f93 - src/common/sdk/nvidia/inc/class/clb06f.h 3d262347ab41547d9ccc28a892d24c83c6b1158e - src/common/sdk/nvidia/inc/class/cla06f.h bae36cac0a8d83003ded2305409192995d264d04 - src/common/sdk/nvidia/inc/class/cl0001.h ba8f5899df4287b8440bcb9c8e09e10db73ebf12 - src/common/sdk/nvidia/inc/class/clc97a.h 7bfcd7cf1735b2a54839e8a734e2227060ebf570 - src/common/sdk/nvidia/inc/class/clc197.h e231c552afb3a78da7341ee49bf36940f1f65202 - src/common/sdk/nvidia/inc/class/clc77d.h 821396a58944ba4620f43cf6ee833b7a04d67193 - src/common/sdk/nvidia/inc/class/clc970.h 1f1879fcddf3c3f1f6c44df0e51822ad1bfa1aae - src/common/sdk/nvidia/inc/class/cl9171.h a23967cf3b15eefe0cc37fef5d03dfc716770d85 - src/common/sdk/nvidia/inc/class/clc372sw.h 02ff42b6686954e4571b8a318575372239db623b - src/common/sdk/nvidia/inc/class/cl30f1_notification.h 4be055f206ef1049e8a5b824f9f4830eba0e224c - src/common/sdk/nvidia/inc/class/cla26f.h ef173136a93cdd2e02ec82d7db05dc223b93c0e1 - src/common/sdk/nvidia/inc/class/clc770.h a3e011723b5863277a453bfcfb59ce967cee0673 - src/common/sdk/nvidia/inc/class/clc670.h f33b9fdad6ceb534530fecfd16b40a71f5f5cfdc - src/common/sdk/nvidia/inc/class/clc56f.h 02906b5ba8aab0736a38fd1f6d7b4f6026a5185b - src/common/sdk/nvidia/inc/class/clc57esw.h aa6387d7ce55a88789c5731e89dedde57115131c - src/common/sdk/nvidia/inc/class/clc97b.h 86ab048c67a075349622c597fa9c4f2a9a3d8635 - src/common/sdk/nvidia/inc/class/cl9571.h 9b2d08d7a37beea802642f807d40413c7f9a8212 - src/common/sdk/nvidia/inc/class/clc37d.h bd9f406625e6c0cce816a5ddfb9078723e7f7fb5 - src/common/sdk/nvidia/inc/class/clb0b5sw.h ab27db8414f1400a3f4d9011e83ac49628b4fe91 - src/common/sdk/nvidia/inc/class/cl987d.h 2614a83d383b540f23ef721ec49af1dfde629098 - src/common/sdk/nvidia/inc/class/cl0080.h 9db39be032023bff165cd9d36bee2466617015a5 - src/common/sdk/nvidia/inc/class/cl0002.h 094bec72bfa8c618edc139bc353b20433f1c1da2 - src/common/sdk/nvidia/inc/class/cl2080_notification.h e72a7871d872b2eb823cc67c0a7d4cafb3d0ca18 - src/common/sdk/nvidia/inc/class/cl90ec.h 0ad3b3e00dc83a0487bd96abd5fe467213aa51ad - src/common/sdk/nvidia/inc/class/clc597.h 869e41c3ba08d704fcf00541075986de43d6b090 - src/common/sdk/nvidia/inc/class/cl917b.h b685769b5f3fed613227498866d06cc3c1caca28 - src/common/sdk/nvidia/inc/class/cl2082.h 4c0d054bd0d9935d8d2cedba3f5e910d6b6f8ed3 - src/common/sdk/nvidia/inc/class/clc997.h 1697a9ed528d633a1e78c0071868d7dff899af26 - src/common/sdk/nvidia/inc/class/clc57a.h 8e85d29d4006dbd3a913fcc088be5e8c87bbdabb - src/common/sdk/nvidia/inc/class/cl0100.h 15d1f928a9b3f36065e377e29367577ae92ab065 - src/common/sdk/nvidia/inc/class/cl0080_notification.h e3bd2cacd357e411bc1b6b7d7660ffa97c3a7ee3 - src/common/sdk/nvidia/inc/class/clb197.h 16f9950a48c4e670b939a89724b547c5be9938bf - src/common/sdk/nvidia/inc/class/clc570.h 060722ac6a529a379375bb399785cbf2380db4fd - src/common/sdk/nvidia/inc/class/clc373.h bd910ff84b9920af83e706a8ab37c68157a372c8 - src/common/sdk/nvidia/inc/class/clc97e.h b71d1f698a3e3c4ac9db1f5824db983cf136981a - src/common/sdk/nvidia/inc/class/cl9170.h 2a031d85b85c4b1e5b278f6010ca8f33b2192de1 - src/common/sdk/nvidia/inc/class/cl90e7.h 9ceb4ec8538818c8b1dcc7ffe885584b8e0f435e - src/common/sdk/nvidia/inc/class/cla097.h a9503a5558b08071f35b11df9a917310947c378b - src/common/sdk/nvidia/inc/class/cl00da.h d8000ab8ef59e64d17b4089c43953ca69b7f605f - src/common/sdk/nvidia/inc/class/clc67e.h 6400b9ad3460dafe00424e3c1b1b7a05ab865a63 - src/common/sdk/nvidia/inc/class/cl50a0.h 7032fd79731907df00a2fe0bbf6c0f4ce87f021d - src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h b11e7b13106fd6656d1b8268ffc15700fba58628 - src/common/sdk/nvidia/inc/class/clc371.h ff47d8a4b4bdb3b9cd04ddb7666005ac7fcf2231 - src/common/sdk/nvidia/inc/class/cl003e.h 0285aed652c6aedd392092cdf2c7b28fde13a263 - src/common/sdk/nvidia/inc/class/cl00fc.h 81b4e4432da8412c119e795662819cfe7558711f - src/common/sdk/nvidia/inc/class/cl917a.h 38265d86eb7c771d2d3fc5102d53e6a170a7f560 - src/common/sdk/nvidia/inc/class/cl0041.h 848c89981de73d681615266e4e983b74c2ef418f - src/common/sdk/nvidia/inc/class/cla06fsubch.h 2d76476dba432ffc1292d2d5dd2a84ff3a359568 - src/common/sdk/nvidia/inc/class/cl0092.h b46b2cfcf72fc2f9722bd42cea8daaeeda861471 - src/common/sdk/nvidia/inc/class/clc871.h 022e8405220e482f83629dd482efee81cc49f665 - src/common/sdk/nvidia/inc/class/clc77f.h fe7484d17bc643ad61faabee5419ddc81cf9bfd6 - src/common/sdk/nvidia/inc/class/cl9570.h bb79bbd1b0a37283802bc59f184abe0f9ced08a5 - src/common/sdk/nvidia/inc/class/cl0040.h 6249715d9876f5825ad62f563bf070e93710a2ad - src/common/sdk/nvidia/inc/class/clc67d.h b1133e9abe15cf7b22c04d9627afa2027e781b81 - src/common/sdk/nvidia/inc/class/cl917c.h 7ef21c4f4fd4032c8f25f8fb33669e692a26e700 - src/common/sdk/nvidia/inc/class/clcb97tex.h 73b706e4916f4c70302387c88c8e14e7b2c1f4e6 - src/common/sdk/nvidia/inc/class/clc67b.h c40fd87fa6293d483b5bf510e2e331143ded9fa4 - src/common/sdk/nvidia/inc/class/cl9470.h 20894d974d1f8f993c290463f1c97c71fd2e40b1 - src/common/sdk/nvidia/inc/class/cl30f1.h 9f7f04825f3f218cc0c4610938935e2f0a73e13b - src/common/sdk/nvidia/inc/class/clc97d.h 04ab1761d913030cb7485149ecd365f2f9c0f7da - src/common/sdk/nvidia/inc/class/cl0005_notification.h da8d312d2fdc6012e354df4fa71ed62ae4aac369 - src/common/sdk/nvidia/inc/class/cl927c.h 158c98c8721d558ab64a025e6fdd04ce7a16ba9e - src/common/sdk/nvidia/inc/class/cl947d.h 5416c871e8d50a4e76cbad446030dbedbe1644fd - src/common/sdk/nvidia/inc/class/cl00f2.h 0b35244321b1f2f6647f8389f6fa7254c34790e2 - src/common/sdk/nvidia/inc/class/cl90cdtrace.h 39161706917567f434a6fff736b22f3358923e68 - src/common/sdk/nvidia/inc/class/clc06f.h bc3674f2384cb3695ce5f035ed16e9c39bba4d1b - src/common/sdk/nvidia/inc/class/cl00fe.h dd4f75c438d19c27e52f25b36fc8ded1ce02133c - src/common/sdk/nvidia/inc/class/cl917cswspare.h 435a34753d445eb9711c7132d70bd26df2b8bdab - src/common/sdk/nvidia/inc/class/cl917d.h b31019107ada7b0fb8247c09d93b95a630821fa8 - src/common/sdk/nvidia/inc/class/clcc7e.h 31939808cd46382b1c63bc1e0bd4af953302773f - src/common/sdk/nvidia/inc/class/cl977d.h 83427e3172c64c3b9ef393205ccc3b961ec65190 - src/common/sdk/nvidia/inc/class/cl5070.h db8dd50ad3e64fe0472d82c0940908d5da5e0321 - src/common/sdk/nvidia/inc/class/cla0b5.h 28867d69a6ceac83da53a11a5e1ef87d9476f0be - src/common/sdk/nvidia/inc/class/clc57d.h 8b07d7aca050be883fdc0d6f4b19eac0b0b6c796 - src/common/sdk/nvidia/inc/class/clc673.h c116d91177c6cbfb8c25e7f35bb49a8d5a51816c - src/common/sdk/nvidia/inc/class/cl008f.h 4fc2133935b8e560c9a1048bc0b1f1c2f0a4464c - src/common/sdk/nvidia/inc/class/cl00c1.h 5a6098f821e8faa19345313477726431f9271cde - src/common/sdk/nvidia/inc/class/clc661.h 6db83e33cb3432f34d4b55c3de222eaf793a90f0 - src/common/sdk/nvidia/inc/class/cl00b1.h 5b573deb4d68ccb67d9cccc11b28203c5db3d2f7 - src/common/sdk/nvidia/inc/ctrl/ctrl0002.h 88947927d79e15df8cbf77a59ac883a29e970413 - src/common/sdk/nvidia/inc/ctrl/ctrlc638.h 625af1df5c9453bd35a9e873ee5c77e73d5fd195 - src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h ade4a731f59c7cd16b4a60d318a19147b9918bb9 - src/common/sdk/nvidia/inc/ctrl/ctrl0004.h 90843f8173a341deb7f1466cd69a17114c6b9e4f - src/common/sdk/nvidia/inc/ctrl/ctrl90f1.h a305225ceda0a39c76ed61b819a1f4165f5644f5 - src/common/sdk/nvidia/inc/ctrl/ctrl00fe.h be3c9e2de8b8d33fe04389b224fa6ad95ecd089b - src/common/sdk/nvidia/inc/ctrl/ctrla06f.h c3e3213f548f93592f7d3dfd76e63a2102d800ec - src/common/sdk/nvidia/inc/ctrl/ctrl0076.h d7415e78725899f9d10fa2d5f03f3d62cef42f26 - src/common/sdk/nvidia/inc/ctrl/ctrlc36f.h 9e343f73f46238075cef766cad499533559dfa28 - src/common/sdk/nvidia/inc/ctrl/ctrl00da.h f7601ce8c7c2d7a1143bff5280e3e5d9b5c4c147 - src/common/sdk/nvidia/inc/ctrl/ctrl906f.h 97ac039e796faca6c9f78e16020fe96225b33492 - src/common/sdk/nvidia/inc/ctrl/ctrlc637.h fe7ce28fe76174a6de68236b44ea565ba2ea687b - src/common/sdk/nvidia/inc/ctrl/ctrl00de.h 3ba6904c69aa7710c4561d5643b18fc41e141d4e - src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h b178067ba5f93e7fafb4c2ee0f5032acf9bc55d7 - src/common/sdk/nvidia/inc/ctrl/ctrla081.h 58a5d3a55b2d9b29d4f1b1e7b5d4d02ae6885e30 - src/common/sdk/nvidia/inc/ctrl/ctrl003e.h 16a24249210637987d17af6069ae5168404743ee - src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h 58f8e48d5851cc10e3c5fd3655d7948b9f327ca0 - src/common/sdk/nvidia/inc/ctrl/ctrl2080.h b86c4d68c5758f9813f00cc562110c72ef602da7 - src/common/sdk/nvidia/inc/ctrl/ctrl90e7.h c042a366bc755def9e4132e2768c1675871dbe65 - src/common/sdk/nvidia/inc/ctrl/ctrl0041.h c8b2e0e64bb3cf3c562dee5fa7913035f82d8247 - src/common/sdk/nvidia/inc/ctrl/ctrl402c.h 352825959d98fe9b47a474cfdd154d380c80d24e - src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h 9d908bb15aecc9d8094e1b6c13301efba6032079 - src/common/sdk/nvidia/inc/ctrl/ctrl0080.h 3fcf5dbb82508d88a040981a7ab21eac1466bb2b - src/common/sdk/nvidia/inc/ctrl/ctrl0073.h bfee287b190fd698735c5660592741ba5c25a8ea - src/common/sdk/nvidia/inc/ctrl/ctrl0020.h 2e65ccd2704919780a152c69f53400a0dc5e6e41 - src/common/sdk/nvidia/inc/ctrl/ctrlb06f.h 4fb7753f3502303314d9e8f853ee3b752f7e9317 - src/common/sdk/nvidia/inc/ctrl/ctrl0100.h 8764e07e9d348163db4eb41b0c3cf32c76172c0d - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h 5782a19aeaf9695c13940cf4532e41523a8460e3 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h f21c15122509a8843e676a2bd5e799c58cd96379 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h 326b61039197db58d8369256f6d7dc9764aea421 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h e7452921bdbd036ca3a37c60c49829c05e95c2d5 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h 5f3b68d39f14137d33f239408a6a13543f4ac966 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h d08ef822e97ee56984618d52ed3ed55ee395eadb - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h 8fcc64b22b0f6cde40d5ecd23e5e2444277a5999 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h 70d65d4f923ec0efd8931433ae50930d12f78a07 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h a33a1c1173962183793d84276e46c61d27ca867e - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h 1b594c39d1439c3d1ecc24c4325b2ea8c2724548 - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h 0146d2b3ecec8760e76dacd8ce6bb75c343c6cac - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h 11abea0cdf485863196de56169451980ee6c016a - src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h 4f0ccb0667bd3e3070e40f3f83bede7849bc78e4 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h 08dda80bac8d3418ad08e291012cf315dc9e5805 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h 28b06c8f8152dce2b2e684a4ba84acd25a8b8c26 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h add9e3867e3dbd2c11bed36604680af4aaa0f164 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h 2ffb93d092df65570b074ad97f0bb436a1c66dff - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h 79fd7ed84cb238ea90ea3691f40ea7140034d3dc - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h 2ea79d79223b06633fb7f541ebbe5a300ba3885f - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h 44c9aa512eb0b9b92cace9e674299f2a9227c37c - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h a3328cf6633f9b04258eff05ce30e66cc6930310 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h a427892e601a4ca4f88cc5778ff78895324f3728 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h 92ff82d1045933baa79958a9f6efd451b0123e95 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h 7ef9e10955708592e92e127eb3fb372adff44818 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h 3c1bd0db339456c335acd50a75ace42cb8bbe6f8 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h be10e3f4a9dd2f2ab35305ee0af628ef339b25a7 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h db66195c8e7252c5f424953275cbb7be90a17ba8 - src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h c74ac448c3382d92e662804b56e73edd748e2678 - src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83debase.h 7318f74523bb6a015e561dba1a06b47a278d856d - src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83dedebug.h 702d9cb471a344a25911449cc580f69f7155ab1c - src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h 3f747a4fc98291329e0245a971248cf2c28a1b60 - src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h 19c7eff334c591c803dcd93fc0818798c281df48 - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbase.h c7dcbc0ae7454df6523c6deb5f07a70dc2fdbc15 - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fgpu.h 882b13d54585a6fc5534d12b9cdcec29c8cde337 - src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fucodecoverage.h 76fb63a6782ff1236303fdd7bf2698f42965a266 - src/common/sdk/nvidia/inc/ctrl/ctrl90e7/ctrl90e7base.h 00d2655f569187190bd117bdf37fe4ddd5e92320 - src/common/sdk/nvidia/inc/ctrl/ctrl90e7/ctrl90e7bbx.h 8064c31eb1e447561c415f9835aecac97d5f3517 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h 713aa1291aef3f79304ad35c5143a7576f242f63 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h bb7955387f6a286927e7922019676ca0aba713e6 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h 35367f08b96510a5312653b5197d6bb34c0a3d00 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h a0cf9dfb520e3320cd9c154c01cd2f1a7bbbd864 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h c2066c407f81538047c435fffca2705c28107663 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h d727b328e995a7d969ec036f2d5b52264568a7bf - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h 52f251090780737f14eb993150f3ae73be303921 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h 77eb4fab61225663a3f49b868c983d5d532ca184 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h 6ca26c7149455e43f32e8b83b74f4a34a24a2d29 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h 134d43961ea1d42fc36d75685fdd7944f92b0b53 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h 022feef64678b2f71ab70dc67d5d604054990957 - src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h 2a00952f0f3988c5425fec957a19d926ae75ba28 - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h 79b38bbe679d397b48b78266aa5f50459fe5b5bc - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h 514d012dbfd9e056b7f729bccb213fa9193d433e - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370or.h 6ef99465758f71f420ac17765380cc37dbcac68a - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h 5f70c2eb6a144bc4d7ca8be63fa46391909e8201 - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h f4ed3ccff4720114d1aaed82484ed70cf07626db - src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h ba3b73356bf0d1409ecfd963b623c50ec83f1813 - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06finternal.h bb0a5ff091ef854b19e7da0043b7b7b10232c3de - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fbase.h 1f25c9f215991f34fee94dafac5fad0e7460db1c - src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h ddeb0df51d5f662948f9098a5d85b40c8ab6504b - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h e3fb93f0ff3469ec76cecdc6f0bf1c296551a2b1 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h a138379dd76c468072f1862b8fc6ae79ee876b4e - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h ee99443c1bd3441df474566622486b04c4502ac0 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h 44e1b06211eee31e42e81879f5220f26ddec70ae - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h ff789d585a7f001b8bd32e07a268c635d39b17ab - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h 03f54e22b39ad5cf682eada7147c6c155f16b385 - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h e8d883de767aa995a374d8da56b5c9da8787cb1d - src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h 8fdb493bda6119025c1d00f289a6394e7dcd1b53 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h cfa32c37f373eeef53aedc3f4dffff1634c122e8 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h 41a0a14e04527fa2c349d2895bb41affd154c999 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h ecd312fabb249a25655e151cee3615c5ab61ffa7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h c30b5995d353e68623b32fea398f461351e3b8f1 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h aa0f685b94bdae99a58aa1a45735b0593a2e6f5a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h aa86ffd04a55436ecacbedb1626f6187bbddedf7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h 3423a69bba50e1405b5a7d631bfff1f6f0a1673f - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h 1990d0c4fa84c6d078282d4d7d0624ccb0325ce7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h 146263409e5304f661da349b56761ab7403144bd - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h 8b622186edb156e980d02bd59a71c01923d1aa23 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h 70dc706ea4ee7b143a716aae9e4f8c0bcef6c249 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h 0a156fc54f45386fabd06ef5ec11ba3a816fbfb7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h c157e185d3c64ee9476ddc75881bfc5a5b8b997f - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h 785d96360f86bc53eb428fd3f4fbeda395400c8a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h b8e8c5ccab01d7997d1fd5579a690cb3279a8ab3 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h b2eecbca32d87b939858bf0b22f93c06b49b3a04 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h 24a891a02e1a882769d4da3454e4dfcf42b1ea6c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h 6969b092708d57f88b0f0fdbb3464c786f90710c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h 013bd8d50841ea314f5ea2bd507470f2c3aff831 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h d63388ff48ca055c82bcd6148506eacd0e26b4dc - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vgpumgrinternal.h 96f72ec608cd198be995f3acd9c04afe7c7e6dc8 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h 359c6b06f2712a527d1ef08465179c14a8b4a751 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h 4c2af959d06536294d62b2366a6ba61ca744bd50 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h d15e8e86ca66b3a69a774e322dfdd349b9f978b9 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spdm.h 898fa08818b657c27b456d952e7a4e09d8d197ee - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h 9933e90ad92eb7df2f64dcc30dcd680d5f7c530d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h 18d1a44b7113c1707bbf5c65fb1be790304c0bed - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h 0cd5e883dfafb74ce2ec9bccca6e688a27e6cfa9 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h 07f82ae90cde3c6e2e6c5af135c40e01660c39a3 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h c8f1c115d78bab309c0a887324b0dabfb8f9ea2d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h ecceb8f7382c8f55c6ccd0330e14ccbc49fcd09c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h 2577a1d505a3d682e223fbcbc6d4c7d13162749d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h d3969094e68f9d584ba9c6fb5457801caff6ccc1 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmu.h 74f1abf45a2a0f60c82e4825b9abfa6c57cab648 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h 115f683e5926ae130de87e4cea805ef6915ed728 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h d4ba227a522423503e5044c774dbcca692c48247 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h 97bb79e74b25134fa02a60d310b3e81170df6fd6 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h baeb07c8bdadf835db754452f63d40956bc6a199 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h 338c7de5d574fe91cda1372c5221e754d4c4b717 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h 4e4a4f9e94f2d7748064949f4b16845829670bf6 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h 5ac6c9a299256935259eaf94323ae58995a97ad7 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h e4441458a7914414a2092f36a9f93389ed65154a - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h b55e4cf81b6112868eb6f6cd9c1a3b32f8fcda49 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h 302f79771fcdba3122cf61affb53e0a3a3a27e6d - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h 5c7b955ef5e6f6ca9c0944e8a2b2c4a1ae760e04 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h 93a9fa93eb3d1099991e4682b6228124220ca293 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h 7f1af5b788616bab285a73bab5098fb6d134b159 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h 51dbd71f1cd5a66dd7a5b0fbb753713d27ff937c - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h cf1757ff453132fb64be0dec6c50eb935db29784 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink_common.h 59254e4bdc475b70cfd0b445ef496f27c20faab0 - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h 119432bbce99e91484a2bac79ca5257a36a7f98b - src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h 7f15697ca8645f77352f88c2a84713f348e98a24 - src/common/unix/nvidia-3d/include/nvidia-3d-vertex-arrays.h 220ac9628fe5afa0191b8c20304402baf0f70353 - src/common/unix/nvidia-3d/include/nvidia-3d-fermi.h 23478354284aa1be69bc70fa4157aa408177829c - src/common/unix/nvidia-3d/include/nvidia-3d-volta.h 75859a11c0fae125a0619c47ead964416ac8d6ed - src/common/unix/nvidia-3d/include/nvidia-3d-pascal.h e621c127011311e8f97c8784d8539751a820bf47 - src/common/unix/nvidia-3d/include/nvidia-3d-maxwell.h 07fc2cd8495309f1218b9ddee4a4809b6dcb65a3 - src/common/unix/nvidia-3d/include/nvidia-3d-types-priv.h 1276b525f23b582e029c2ddc9ed0115f8e9dafb4 - src/common/unix/nvidia-3d/include/nvidia-3d-hopper.h 5030b264e17b70df0c99bc9da4350bdb48f2f60a - src/common/unix/nvidia-3d/include/nvidia-3d-kepler.h 146b4f305bfe710622a878fe3e9afd4f834124b8 - src/common/unix/nvidia-3d/include/nvidia-3d-turing.h 61f0a408812c04a59fb8f12713ce34d2ed544fe3 - src/common/unix/nvidia-3d/include/nvidia-3d-surface.h e7a4acaef431a49ca7efd6bf72b6e8b57fafbab0 - src/common/unix/nvidia-3d/include/nv_xz_mem_hooks.h 40a9c57cca5b2f8acfe3ead472dcf0adc9423050 - src/common/unix/nvidia-3d/src/nvidia-3d-vertex-arrays.c af1a4d99bd19b72de120ba2046f35b95650985b1 - src/common/unix/nvidia-3d/src/nvidia-3d-volta.c f78f737f1dfb52cf248543cced017a8fbad7b270 - src/common/unix/nvidia-3d/src/nvidia-3d-surface.c 4ea7a2a6811239760a1b56833fb07dbf8a99a10e - src/common/unix/nvidia-3d/src/nvidia-3d-hopper.c e43e6ce6b9781d44b68868703fdbb779fc95f5d4 - src/common/unix/nvidia-3d/src/nvidia-3d-kepler.c 09fa5fbae25e08c819277566d7281f17305863f8 - src/common/unix/nvidia-3d/src/nvidia-3d-turing.c e0ef9ab77cfdf207c800a9c067739add28632047 - src/common/unix/nvidia-3d/src/nvidia-3d-pascal.c 57f19f6aa7b896794aafacd978b2469d976f6f78 - src/common/unix/nvidia-3d/src/nvidia-3d-maxwell.c 08c29625af227debb72dd703630a754ac4fbeee0 - src/common/unix/nvidia-3d/src/nvidia-3d-core.c 7ca41841cc54bd597f5c10cc346b8f574b1c2acf - src/common/unix/nvidia-3d/src/nvidia-3d-fermi.c d0331b7ebba0537af50bdf5815d9c048cbeb3388 - src/common/unix/nvidia-3d/src/nvidia-3d-init.c 569a662ce5f79dc450f44eeb7a0ff36580ba27fe - src/common/unix/nvidia-3d/interface/nvidia-3d-types.h a06524af04de90562b08b6b26783232cf7ff01d4 - src/common/unix/nvidia-3d/interface/nvidia-3d-utils.h 3e97ecc773087c0c7f370faf0a9ff838793c9bd6 - src/common/unix/nvidia-3d/interface/nvidia-3d-color-targets.h 2d91e6f3ad425d3ca95de79ecb929b22cac57f52 - src/common/unix/nvidia-3d/interface/nvidia-3d-shaders.h fd454a2318e970e6b1cb4a4b7b5633e4cb2e8b45 - src/common/unix/nvidia-3d/interface/nvidia-3d.h 34daeec12bbf45f0f85406afc56414da45afc2e6 - src/common/unix/nvidia-3d/interface/nvidia-3d-shader-constants.h 727210acfe72963aa6dddf1bcee91dc122897113 - src/common/unix/nvidia-3d/interface/nvidia-3d-constant-buffers.h 069b576dc1f03143999512cd03fc48fe18ed6706 - src/common/unix/nvidia-3d/interface/nvidia-3d-imports.h 2476f128437c0520204e13a4ddd2239ff3f40c21 - src/common/unix/common/inc/nv-float.h 881cbcc7ed39ea9198279136205dbe40142be35e - src/common/unix/common/inc/nv_assert.h cb7c13757ca480e10b4ef3e3851d82ad5ccca3f1 - src/common/unix/common/inc/nv_mode_timings.h d5253e7e4abd3ad8d72375260aa80037adcd8973 - src/common/unix/common/inc/nv_dpy_id.h 3e64a8fe60bb1266a769be8a5c0716e10c816b38 - src/common/unix/common/inc/nv_amodel_enum.h 995d8447f8539bd736cc09d62983ae8ebc7e3436 - src/common/unix/common/inc/nv_common_utils.h edded9ca3d455444372fe6c497b2d61bd0cc3f96 - src/common/unix/common/utils/nv_memory_tracker.c 7bccb5a3dea9208f0fbd86d36efc369f215d5c3c - src/common/unix/common/utils/unix_rm_handle.c 26f2a36442266c5d2664d509ecfd31094a83e152 - src/common/unix/common/utils/nv_vasprintf.c e903bbbecf4fb3085aaccca0628f0a0e4aba3e58 - src/common/unix/common/utils/nv_mode_timings_utils.c 667b361db93e35d12d979c47e4d7a68be9aa93b6 - src/common/unix/common/utils/interface/nv_mode_timings_utils.h 07c675d22c4f0f4be6647b65b6487e2d6927c347 - src/common/unix/common/utils/interface/nv_memory_tracker.h 8d9c4d69394b23d689a4aa6727eb3da1d383765a - src/common/unix/common/utils/interface/unix_rm_handle.h 9e008270f277e243f9167ab50401602378a2a6e8 - src/common/unix/common/utils/interface/nv_vasprintf.h 673bbd33569f55a900b5388a77d19edd3822ecf3 - src/common/unix/xzminidec/src/xz_dec_stream.c 9c67bdcbea04fbe1a5b2746549e502cdc368b54e - src/common/unix/xzminidec/src/xz_config.h f2cfbcf1e2cb1d7545b5de609a4e7672bf8ae976 - src/common/unix/xzminidec/src/xz_dec_bcj.c 93af3bcdf863afa9655107c86f49aefdf9c05d90 - src/common/unix/xzminidec/src/xz_lzma2.h fba46fe8f4a160d71a708578a85ab6731e4e024f - src/common/unix/xzminidec/src/xz_crc32.c 0ce26be0fb63a7ae52e2bb15a1770c80b9a5ac84 - src/common/unix/xzminidec/src/xz_stream.h 8365ec8d875fad74507d49228ad8959c66bbc360 - src/common/unix/xzminidec/src/xz_dec_lzma2.c 2ade48b4c53fc3bebf1587bc0a1a08b26cd5981d - src/common/unix/xzminidec/src/xz_private.h c2a87873eeff2a8010bb8a2cb8d1df28a20a0097 - src/common/unix/xzminidec/interface/xz.h 4498dc65d71b2b8635b365550e5e521da14c8e6b - src/common/unix/nvidia-push/include/nvidia-push-priv.h 4847b168b4f5e78dbb92cfec80734789a9131b87 - src/common/unix/nvidia-push/include/nvidia-push-priv-imports.h 616dd99d8dda5dbe35032a5fc558ff48f7cc1620 - src/common/unix/nvidia-push/src/nvidia-push-init.c 0916485ec1ff275771d88a725dcbf586663dbc33 - src/common/unix/nvidia-push/src/nvidia-push.c 548f9e591d2c851b157575e1b83e25eb47bc61e6 - src/common/unix/nvidia-push/interface/nvidia-push-methods.h 5f5013bdbda9582252db2e92a105a57f24ca7d96 - src/common/unix/nvidia-push/interface/nvidia-push-init.h f3576444d1dbcc4e9379bee6151ef8c7a382e276 - src/common/unix/nvidia-push/interface/nvidia-push-utils.h 918c4f2e2edd0a52c7085f758286dacd21b5b4c5 - src/common/unix/nvidia-push/interface/nvidia-push-types.h b54add7dea08ff736ac27ee259f6ccb389c01f09 - src/common/unix/nvidia-headsurface/nvidia-headsurface-types.h 5d014581148b38eede1d31a1f48e388cf6eb7a45 - src/common/unix/nvidia-headsurface/nvidia-headsurface-constants.h e1fbb040ea9d3c773ed07deb9ef5d63c8c8cab7a - src/common/inc/nvSha1.h 8f0d91e1a8f0d3474fb91dc3e6234e55d2c79fcc - src/common/inc/rmosxfac.h bcad75550591ede46152403e40413f87e85b0a80 - src/common/inc/nvlog_defs.h ebccc5c2af2863509e957fe98b01d9a14d8b0367 - src/common/inc/nv_list.h 0e970acfcadddd89fae91c812647fecb80c98d52 - src/common/inc/pex.h 73e2133709eb920a92fcebf7aaab958020493183 - src/common/inc/nvctassert.h 6fa5359ffe91b624548c226b6139f241771a9289 - src/common/inc/jt.h 489ce9f046d9c2ff95a1284ab5e04b5843b874ae - src/common/inc/nvVer.h 7ab322addb3e1ba880cee07dc0d26d882db097b0 - src/common/inc/nvCpuIntrinsics.h d9c0905f374db0b9cc164ce42eab457d1ba28c53 - src/common/inc/nvop.h d70c17a0693c8b5dbf7c83f693eec352ce22917c - src/common/inc/nv_smg.h b4c5d759f035b540648117b1bff6b1701476a398 - src/common/inc/nvCpuUuid.h 4282574b39d1bcaf394b63aca8769bb52462b89b - src/common/inc/nvBinSegment.h 8c41b32c479f0de04df38798c56fd180514736fc - src/common/inc/nvBldVer.h 62e510fa46465f69e9c55fabf1c8124bee3091c4 - src/common/inc/nvHdmiFrlCommon.h 82aadec9509f41eab58727c3498dc24a30a0128e - src/common/inc/nvrmcontext.h d74a8d4a9ae3d36e92b39bc7c74b27df44626b1c - src/common/inc/nv_mig_types.h a346380cebac17412b4efc0aef2fad27c33b8fb5 - src/common/inc/nvlog_inc2.h e670ffdd499c13e5025aceae5541426ab2ab0925 - src/common/inc/gps.h 963aebc9ec7bcb9c445eee419f72289b21680cdd - src/common/inc/hdmi_spec.h 987027bed503d8ce5ad01706aae4a16ee37f3e2d - src/common/inc/nvSemaphoreCommon.h 5257e84f2048b01258c78cec70987f158f6b0c44 - src/common/inc/nvlog_inc.h 4a88a536b71995db70e3a83a48d47072693ec69d - src/common/inc/nv_speculation_barrier.h 2408132586b69e580ff909f7f66451aa2882abff - src/common/inc/nvPNPVendorIds.h 4f7ca8fb43d6885cf60869ed241476032f20f5f3 - src/common/inc/nvUnixVersion.h 23edf9cce2608c494dad045b9466b8f3a18bab56 - src/common/inc/displayport/dpcd20.h ecc26f6fae35818791733c1a56ea1b556bba7f4f - src/common/inc/displayport/displayport2x.h aad6f14dacdb166a8d884cae6c5f382d98e5c46c - src/common/inc/displayport/dpcd14.h 27572a26d0a0a32f38606323ea6da65096bac039 - src/common/inc/displayport/displayport.h 8f7c9c19a76eca84fc2556841042c2f1c3d07a1a - src/common/inc/displayport/dpcd.h 4ee8a4d2a0fe12d348ac4c1a1e0a22bd272e146d - src/common/inc/swref/common_def_nvlink.h e182f9538fea08b5d25f3e74083a7a12a7d49809 - src/common/inc/swref/published/nv_ref.h 641e9803749cbeeca1149c43fe2da5e6edf25137 - src/common/inc/swref/published/nv_arch.h 059493ce7d5390b7e859a19d1a24752df8126ace - src/common/inc/swref/published/turing/tu102/kind_macros.h 86a59440492fd6f869aef3509f0e64a492b4550d - src/common/inc/swref/published/turing/tu102/dev_mmu.h 38589617aab40efdd86b401a18d1e28b5d3b9f8e - src/common/inc/swref/published/disp/v05_02/dev_disp.h 1ea0c3d6ea0c79c01accc7b25d15b421ab49a55d - src/common/inc/swref/published/disp/v04_02/dev_disp.h c01e4a95ede641ff5a9e6918b39db4d2099c91cb - src/common/inc/swref/published/disp/v05_01/dev_disp.h 04345c77f8c7a8b4825f0cb7fc96ca7c876af51c - src/common/inc/swref/published/disp/v04_01/dev_disp.h 1604a3fa3e3142118c82a1dc621cdac81806195a - src/common/inc/swref/published/disp/v03_00/dev_disp.h c4f12d6055573a19f9211fdddd3778575e2a17fd - src/common/inc/swref/published/disp/v02_04/dev_disp.h 64c123c90018c5ee122b02b02cbccfcd5ec32cab - src/common/inc/swref/published/t23x/t234/dev_fuse.h b5ce995e9e5afcd73d39642e31998e087ea133e8 - src/common/shared/nvstatus/nvstatus.c 08816a33e698308c76f3a026c29d0dcb41c5ee20 - src/common/shared/inc/compat.h 9231ac111286772170925e8f6cf92bde5914abb8 - src/common/shared/inc/nvdevid.h 750ecc85242882a9e428d5a5cf1a64f418d59c5f - src/common/displayport/inc/dp_object.h a6ff1a7aee138f6771c5b0bbedb593a2641e1114 - src/common/displayport/inc/dp_messages.h 80380945c76c58648756446435d615f74630f2da - src/common/displayport/inc/dp_timeout.h cdb1e7797c250b0a7c0449e2df5ce71e42b83432 - src/common/displayport/inc/dp_merger.h 070b4f6216f19feebb6a67cbb9c3eb22dc60cf74 - src/common/displayport/inc/dp_buffer.h 02b65d96a7a345eaa87042faf6dd94052235009c - src/common/displayport/inc/dp_messageheader.h 78595e6262d5ab0e6232392dc0852feaf83c7585 - src/common/displayport/inc/dp_auxbus.h e27519c72e533a69f7433638a1d292fb9df8772e - src/common/displayport/inc/dp_crc.h b2db6b37515f7c979e18686694546b9fa5145459 - src/common/displayport/inc/dp_hostimp.h 29ee5f4ef6670f06e96c07b36c11e3bad8bee6aa - src/common/displayport/inc/dp_address.h 575f4f97189ad6b4944bdd4127cdbee79d8c688d - src/common/displayport/inc/dp_groupimpl.h cf09c061fa898cd84edd34a9457726abc501b03c - src/common/displayport/inc/dp_configcaps.h afa1135330de2ce8f1a6d20e99b54f507b5adbbd - src/common/displayport/inc/dp_evoadapter.h 01f1dd58ed5bb12503fa45be7a6657cde0a857e2 - src/common/displayport/inc/dp_guid.h cca426d571c6b01f7953180e2e550e55c629f0f4 - src/common/displayport/inc/dp_auxretry.h a086546bf92d7e5e9adf66dcac012b3dc81c2597 - src/common/displayport/inc/dp_internal.h f6e1b0850f5ed0f23f263d4104523d9290bb8669 - src/common/displayport/inc/dp_vrr.h 2f134665b274bb223c3f74e0ec5c6a0392fa6387 - src/common/displayport/inc/dp_discovery.h 07d22f84e6a386dad251761278a828dab64b6dd5 - src/common/displayport/inc/dp_bitstream.h f09aae8321de23e0a48072d0e082aecb84a3ebbe - src/common/displayport/inc/dp_mainlink.h cae50568f7bef4a2a69c4d718a5297b9ae15da3f - src/common/displayport/inc/dp_deviceimpl.h eb9cdbb0a907926b1afd2a551ec19830f06ae205 - src/common/displayport/inc/dp_splitter.h 5bd3706ceea585df76a75dda7f9581b91ee8f998 - src/common/displayport/inc/dp_tracing.h 4a098c4d09dedc33b86748d5fe9a30d097675e9f - src/common/displayport/inc/dp_list.h 6c87ce702f215b21c1ab0064a2a85b3eda96ecec - src/common/displayport/inc/dp_edid.h be558902391fb6cb5085652b560391b54befca4b - src/common/displayport/inc/dp_printf.h 379d3933c90eaf9c35a0bad2bd6af960a321465f - src/common/displayport/inc/dp_wardatabase.h 2016714a04d46ac8412ef55d2156d86ba4d594eb - src/common/displayport/inc/dp_auxdefs.h e2075486b392d6b231f2f133922ac096ca4bc095 - src/common/displayport/inc/dp_ringbuffer.h 09c80a469f1e7e0edd6381578d66fd0e789bc0db - src/common/displayport/inc/dp_regkeydatabase.h 7622cb576c2ebbfe65c0f6132d8561ab1815f668 - src/common/displayport/inc/dp_qse.h dd420c9e7c271d8bea047d431667524105473e95 - src/common/displayport/inc/dp_linkconfig.h e02e5621eaea52a2266a86dcd587f4714680caf4 - src/common/displayport/inc/dp_linkedlist.h 430f42522a1e60f2420aa2e4e471aa20945d0253 - src/common/displayport/inc/dp_timer.h 0f71b80d0a0d53fc6581ef341a4e637a467a3795 - src/common/displayport/inc/dp_connectorimpl.h c8c55dfc7b085b421b01bd9dc7b74abe6f9a0932 - src/common/displayport/inc/dp_connector.h 78ef30b2caf2cf4ff441b5613a796b93ae8973bd - src/common/displayport/inc/dp_messagecodings.h 1363fca23628f312c4b6b0c868b8a43f4a8a5a24 - src/common/displayport/inc/dp_watermark.h d2b00a849a81f6c6092e3b2c4e7ed20fcee62b39 - src/common/displayport/inc/dptestutil/dp_testmessage.h 70b155b0da07a92ede884a9cec715f67e6b5c3e8 - src/common/displayport/src/dp_list.cpp 107b170d4496a754f22819e66794bcdc51256b7c - src/common/displayport/src/dp_sst_edid.cpp fea946e5320e7de8e9229bca8d4a6a14b9e8db59 - src/common/displayport/src/dp_crc.cpp 2caf1cd4a99e55126883dbdd9f6b74883c71e171 - src/common/displayport/src/dp_messagecodings.cpp ef3fefa8dd819d4086c054919b769ca18d058469 - src/common/displayport/src/dp_wardatabase.cpp c49e37f3e225e60a74c71a2b571e542e12fd9bc9 - src/common/displayport/src/dp_watermark.cpp e874ffeaeb6deec57605bf91eaa2af116a9762bd - src/common/displayport/src/dp_bitstream.cpp d699ce22e5e2d641caa2fbacca3095d7dd7b3ffe - src/common/displayport/src/dp_evoadapter.cpp 5f2fb1683cff15175e3ef2276b721863886adc79 - src/common/displayport/src/dp_vrr.cpp 0717b87aafecbe2216e0f0b53ee088a980ef7ad4 - src/common/displayport/src/dp_auxretry.cpp 0670fb5302b1bd3fc65daa848f23e4086619b5e6 - src/common/displayport/src/dp_discovery.cpp 5c12759c27407e8df4c8f1f7bc6ec1595b6b1a63 - src/common/displayport/src/dp_messages.cpp 93ba2409667997fdbcb7af1a8f24ec4a0e15b62c - src/common/displayport/src/dp_timer.cpp ffdd039884b1400eaf4d6d7cc81d0faba5282014 - src/common/displayport/src/dp_deviceimpl.cpp c625716e5516a290ac501563e2a73eef9b4f7dd6 - src/common/displayport/src/dp_edid.cpp af1672e8abb92d8d574d9605285753a8580c5d10 - src/common/displayport/src/dp_groupimpl.cpp 2cda981a5e36285ba4173573d074f8761e74f186 - src/common/displayport/src/dp_qse.cpp 5c7adbdfe295f7e1a1d4899a62bf95b456f84412 - src/common/displayport/src/dp_messageheader.cpp d3c4c54f96cc02d37fab45521685426e5c38fb4d - src/common/displayport/src/dp_mst_edid.cpp f56f92e32710b0342805b785d34ba1a9f2a54ed3 - src/common/displayport/src/dp_guid.cpp eb7e47407bd04e871f891038cc08736d066ffaa9 - src/common/displayport/src/dp_connectorimpl.cpp a62b774b7c45882b5854b91b600987c343c24966 - src/common/displayport/src/dp_linkconfig.cpp 0a8818da34b5321763c1f60cb8b6ea5e1a2837f1 - src/common/displayport/src/dp_splitter.cpp 24c0787ce5ec691c6b8edb351000265f47e0156a - src/common/displayport/src/dp_buffer.cpp 422a5d3426d5e1cc2346d9d5f86ccde66062ffdc - src/common/displayport/src/dp_merger.cpp 41589d1d5bfa4316d5d066a7201226baed5332db - src/common/displayport/src/dp_configcaps.cpp a0b68fce10eb0b95518cfd291e2d282872225295 - src/common/displayport/src/dptestutil/dp_testmessage.cpp f0a73cd173382d8abd4b0c70da8b32e144740bb5 - src/common/modeset/timing/nvt_dmt.c 15d7c508b621c877887962b2c27cdb6c7d1144a0 - src/common/modeset/timing/nvt_util.c 1341b987df8336c882e31d22d2141cadfb67272d - src/common/modeset/timing/nvtiming.h f8faf3eabd24a1239e1d4faebdc40c0ffa713ff9 - src/common/modeset/timing/nvt_edid.c c95a1c7914b0d1cba366f2a29e08eb93e0ad033d - src/common/modeset/timing/nvt_edidext_displayid.c 3d3a0889baed7a15c2adce54ba56c1dc783faffd - src/common/modeset/timing/dpsdp.h ff92b05f8648cb4bc31c0f64707065bb56ff3eb3 - src/common/modeset/timing/nvt_dsc_pps.c f75b1d98895bdccda0db2d8dd8feba53b88180c5 - src/common/modeset/timing/displayid.h 1997adbf2f6f5be7eb6c7a88e6660391a85d891b - src/common/modeset/timing/nvt_gtf.c 2737ed1d1eccd163f9cd12b1944f96a03c526b31 - src/common/modeset/timing/nvtiming_pvt.h 58b68f1272b069bb7819cbe86fd9e19d8acd0571 - src/common/modeset/timing/edid.h 6d221aad371436ba304448ba2cf04f89148a09bb - src/common/modeset/timing/nvt_edidext_displayid20.c 48761f63bc2794dfbde10492cc53137458cfcd0e - src/common/modeset/timing/nvt_dsc_pps.h 08ef97092899a3dc80251f61cedc73a851d70baa - src/common/modeset/timing/nvt_edidext_861.c d7cb716eeae50ecfe44fb3c4c4476de598ab78d7 - src/common/modeset/timing/nvt_tv.c 080c1de64d099ecb1aeb9b0b2f176f7be2d609b5 - src/common/modeset/timing/displayid20.h 1c2e163802849848e9ae1586d38c4cd82494217f - src/common/modeset/timing/nvt_ovt.c 54aa88075d9ceb9c6ef99d9c15cb32751a33f8d0 - src/common/modeset/timing/nvt_cvt.c e13cbe77f864afcddaccff7aeb1923cd02f1482f - src/common/modeset/timing/nvt_displayid20.c f8911888bdd441666c03fe27381d7730b7dd9131 - src/common/modeset/hdmipacket/nvhdmipkt_internal.h 12118b508a757fd0a162d1e740d93685a67363ea - src/common/modeset/hdmipacket/nvhdmipkt.c 5b541b9ab6fe9333815a760d4043fef725b1c848 - src/common/modeset/hdmipacket/nvhdmipkt_C971.c 83d94f0a5eb7318d00d96115b0139f9f99052ddc - src/common/modeset/hdmipacket/nvhdmipkt_CC71.c b390bf4f74d690068ff24dce90b79b227769ac2f - src/common/modeset/hdmipacket/nvhdmipkt_C671.c 206727972ab3a5f8a2cde0e153d63aef929b6c01 - src/common/modeset/hdmipacket/nvhdmipkt_0073.c a71968671ce6b64e235de6902bebc2a06da7ae04 - src/common/modeset/hdmipacket/nvhdmipkt_9171.c 54a1b5e5aaf0848a72befc896ed12f1de433ad4f - src/common/modeset/hdmipacket/nvhdmipkt_9471.c 57dbf547549c6fe24eb51cc54185b321c263108f - src/common/modeset/hdmipacket/nvhdmipkt.h 9be7b7be94a35d1d9a04f269ff560dbbb7860a2a - src/common/modeset/hdmipacket/nvhdmipkt_9571.c 559406ebdbd7f810f1ecbeb3e78b6518834b90fe - src/common/modeset/hdmipacket/nvhdmipkt_class.h e1df3885cd76f5159801c1f66f20b18537eaecf3 - src/common/modeset/hdmipacket/nvhdmipkt_C871.c 5e12a290fc91202e4ba9e823b6d8457594ed72d3 - src/common/modeset/hdmipacket/nvhdmi_frlInterface.h 67db549636b67a32d646fb7fc6c8db2f13689ecc - src/common/modeset/hdmipacket/nvhdmipkt_9271.c e6d500269128cbd93790fe68fbcad5ba45c2ba7d - src/common/modeset/hdmipacket/nvhdmipkt_C371.c 764d216e9941d0dcf41e89b2a0ddd8acf55902c8 - src/common/modeset/hdmipacket/nvhdmipkt_common.h b882497ae393bf66a728dae395b64ac53602a1a5 - src/common/softfloat/nvidia/nv-softfloat.h be9407a273620c0ba619b53ed72d59d52620c3e4 - src/common/softfloat/nvidia/platform.h f6d98979ab2d1e2b0d664333104130af6abbcad5 - src/common/softfloat/source/f64_to_i64_r_minMag.c 21a6232d93734b01692689258a3fdfbbf4ff089d - src/common/softfloat/source/s_roundToUI32.c 29321080baa7eab86947ac825561fdcff54a0e43 - src/common/softfloat/source/i32_to_f32.c dafa667ee5dd52c97fc0c3b7144f6b619406c225 - src/common/softfloat/source/s_mulAddF64.c 108eec2abf1cddb397ce9f652465c2e52f7c143b - src/common/softfloat/source/f64_roundToInt.c 513a7d1c3053fc119efcd8ae1bcc9652edc45315 - src/common/softfloat/source/f32_lt.c d19ff7dfece53875f2d6c6f7dd9e7772f7b0b7ec - src/common/softfloat/source/f32_to_i64_r_minMag.c 2db07bbb8242bc55a24ef483af6d648db0660de0 - src/common/softfloat/source/f32_add.c c951c9dffa123e4f77ed235eca49ef9b67f9f3d2 - src/common/softfloat/source/s_subMagsF64.c 5c1026617c588bcf5f1e59230bd5bb900600b9ac - src/common/softfloat/source/f64_mul.c 5c4ee32cc78efc718aaa60ec31d0b00b1bee3c2c - src/common/softfloat/source/f64_to_ui64_r_minMag.c 6fa7493285fe2f7fdc0ac056a6367e90327905c2 - src/common/softfloat/source/f32_sub.c da3b3f94a817909a3dc93ca5fa7675805c7979e0 - src/common/softfloat/source/f64_isSignalingNaN.c d701741d8d6a92bb890e53deda1b795f5787f465 - src/common/softfloat/source/f64_le.c baa7af4eea226140c26ffe6ab02a863d07f729fb - src/common/softfloat/source/f64_eq_signaling.c 2e5c29d842a8ebc5fbf987068dc9394cee609cc7 - src/common/softfloat/source/f32_to_ui64.c 054b23a974fc8d0bab232be433c4e516e6c1250a - src/common/softfloat/source/f64_lt_quiet.c dde685423af544e5359efdb51b4bf9457c67fa3b - src/common/softfloat/source/f32_sqrt.c fb062ecbe62a1f5878fd47f0c61490f2bde279dd - src/common/softfloat/source/s_roundToI32.c 8e58f0258218475616ff4e6317516d40ad475626 - src/common/softfloat/source/f32_lt_quiet.c ab19c6b50c40b8089cb915226d4553d1aa902b0e - src/common/softfloat/source/f64_to_i32_r_minMag.c 86fdc2472526375539216461732d1db6a9f85b55 - src/common/softfloat/source/s_roundPackToF32.c 9266c83f3e50093cc45d7be6ab993a0e72af1685 - src/common/softfloat/source/s_roundPackToF64.c 2e0fec421f4defd293cf55c5f3af7d91f4b7d2cc - src/common/softfloat/source/ui64_to_f32.c 68843a93e1f46195243ef1164f611b759cf19d17 - src/common/softfloat/source/f32_le_quiet.c 00ab2120f71117161d4f6daaa9b90a3036a99841 - src/common/softfloat/source/f32_to_ui32.c d0f8f08c225b60d88b6358d344404ba9df3038ec - src/common/softfloat/source/s_normSubnormalF32Sig.c 0108fe6f0d394ad72083aff9bb58507f97a0b669 - src/common/softfloat/source/ui32_to_f64.c 7bc81f5bc894118c08bfd52b59e010bc068ed762 - src/common/softfloat/source/ui32_to_f32.c 0adfa7e174cdb488bb22b06642e14e7fc6f49c67 - src/common/softfloat/source/s_roundToI64.c c3ce12c227d25bc0de48fbcf914fc208e2448741 - src/common/softfloat/source/f64_sub.c b9fd15957f7ae5effeccb5d8adaa7434b43f44e1 - src/common/softfloat/source/s_roundToUI64.c 29396b7c23941024a59d5ea06698d2fbc7e1a6ca - src/common/softfloat/source/f64_to_i64.c ae25eea499b3ea5bdd96c905fd0542da11083048 - src/common/softfloat/source/s_normRoundPackToF64.c b22876b0695f58ee56143c9f461f1dde32fefbf3 - src/common/softfloat/source/f64_to_ui64.c b8c5ccc1e511637d8b2ba2657de4937b80c01c07 - src/common/softfloat/source/f32_le.c 0126e0fceb1fa7912f4d5b8c3a6ebb4a048eb98a - src/common/softfloat/source/f16_to_f32.c 1ff879eca2a273293b5cd6048419b2d2d8063b93 - src/common/softfloat/source/f64_mulAdd.c 0e9694d551848d88531f5461a9b3b91611652e9a - src/common/softfloat/source/f64_to_ui32_r_minMag.c 5a5e0d9f1ee7e8c0d1d4f9fbcf6eba330a5f1792 - src/common/softfloat/source/f32_isSignalingNaN.c bc992c88f3de09e3a82447cf06dbde7c6604f7f8 - src/common/softfloat/source/f64_to_f32.c 1a86a6948bf6768bd23a19f1f05d40968c1d2b15 - src/common/softfloat/source/f64_rem.c 50daf9186bc5d0180d1453c957164b136d5ffc89 - src/common/softfloat/source/f64_eq.c 09cb0cdb90eb23b53cd9c1a76ba26021084710d1 - src/common/softfloat/source/s_addMagsF32.c 9f4d355d85fbe998e243fe4c7bbf8ad23062b6e2 - src/common/softfloat/source/i64_to_f64.c fd40a71c7ebf9d632a384fadf9487cfef4f3ea98 - src/common/softfloat/source/s_shiftRightJam128.c aaf6ccb77a1a89fa055a0fb63513297b35e2e54b - src/common/softfloat/source/f64_le_quiet.c 38bd00e9c4d2f1354c611404cca6209a6c417669 - src/common/softfloat/source/s_countLeadingZeros64.c d9a86343e6cc75714f65f690082dd4b0ba724be9 - src/common/softfloat/source/s_roundPackToF16.c 0bf499c0e3a54186fa32b38b310cc9d98ccdcfe3 - src/common/softfloat/source/f32_eq.c d4b26dc407a891e9ff5324853f1845a99c5d5cd2 - src/common/softfloat/source/f32_to_i32.c 296c40b0589536cb9af3231ad3dcd7f2baaa6887 - src/common/softfloat/source/f64_lt.c 0d8e42636a3409a647291fdb388001c2b11bba07 - src/common/softfloat/source/f32_to_f16.c 9a60700ce25578100d83d529e49f08f71cf35e17 - src/common/softfloat/source/s_normSubnormalF16Sig.c ec1a797b11f6e846928a4a49a8756f288bda1dfa - src/common/softfloat/source/i32_to_f64.c 729e790328168c64d65a1355e990274c249bbb3a - src/common/softfloat/source/f32_to_i32_r_minMag.c 9a5b93459ace2da23964da98617d6b18006fab86 - src/common/softfloat/source/s_countLeadingZeros8.c 84b0a01ba2a667eb28b166d45bd91352ead83e69 - src/common/softfloat/source/i64_to_f32.c 4b37be398b3e73ae59245f03b2ba2394fc902b4d - src/common/softfloat/source/s_normSubnormalF64Sig.c 6f83fa864007e8227ae09bb36a7fdc18832d4445 - src/common/softfloat/source/f32_mul.c daeb408588738b3eb4c8b092d7f92ac597cf1fc6 - src/common/softfloat/source/f32_rem.c a94c8c2bd74633027e52e96f41d24714d8081eb4 - src/common/softfloat/source/s_approxRecipSqrt_1Ks.c 69dc4cc63b2a9873a6eb636ee7cb704cbd502001 - src/common/softfloat/source/f64_to_ui32.c 50b3147f8413f0595a4c3d6e6eeab84c1ffecada - src/common/softfloat/source/s_normRoundPackToF32.c bbc70102b30f152a560eb98e7a1a4b11b9ede85e - src/common/softfloat/source/f64_sqrt.c 760fd7c257a1f915b61a1089b2acb143c18a082e - src/common/softfloat/source/s_addMagsF64.c ebb4f674b6213fec29761fc4e05c1e3ddeda6d17 - src/common/softfloat/source/f32_mulAdd.c 4445b1fbbd507144f038fd939311ff95bc2cf5f1 - src/common/softfloat/source/ui64_to_f64.c 871cb1a4037d7b4e73cb20ad18390736eea7ae36 - src/common/softfloat/source/f32_to_ui64_r_minMag.c ce37cdce572a3b02d42120e81c4969b39d1a67b6 - src/common/softfloat/source/f64_to_i32.c c29536f617d71fe30accac44b2f1df61c98a97dc - src/common/softfloat/source/f64_div.c 54cbeb5872a86e822bda852ec15d3dcdad4511ce - src/common/softfloat/source/f64_add.c e7890082ce426d88b4ec93893da32e306478c0d1 - src/common/softfloat/source/s_approxRecipSqrt32_1.c 824383b03952c611154bea0a862da2b9e2a43827 - src/common/softfloat/source/s_subMagsF32.c 00c612847b3bd227a006a4a2697df85866b80315 - src/common/softfloat/source/s_mulAddF32.c 7c8e5ab3f9bf6b2764ce5fffe80b2674be566a12 - src/common/softfloat/source/softfloat_state.c e4930e155580a0f5aa7f3694a6205bc9aebfe7aa - src/common/softfloat/source/f32_to_f64.c 1484fc96d7731695bda674e99947280a86990997 - src/common/softfloat/source/f32_to_i64.c 2960704c290f29aae36b8fe006884d5c4abcabb4 - src/common/softfloat/source/f32_div.c 23b76c1d0be64e27a6f7e2ea7b8919f1a45a8e7c - src/common/softfloat/source/f32_to_ui32_r_minMag.c fe06512577e642b09196d46430d038d027491e9f - src/common/softfloat/source/f32_eq_signaling.c 5e6f9e120a17cc73297a35e4d57e4b9cbce01780 - src/common/softfloat/source/s_mul64To128.c e0ad81cfb5d2c0e74dc4ece9518ca15ffc77beaf - src/common/softfloat/source/f32_roundToInt.c d8b0c55a49c4fa0b040541db6d5ff634d7d103e7 - src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c a6d5c83f6a0542b33ac9c23ac65ef69002cfff9d - src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c 8efb3f7cd3217b5cd25896b4bad058c72fe5b89a - src/common/softfloat/source/8086-SSE/specialize.h 3d0dbc0a672d039a6346e1c21ddf87ffc9181978 - src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c d152bc457b655725185bdff42b36bb96d6e6715e - src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c 1dd1b424087d9c872684df0c1b4063b077992d5f - src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c 252c816378fddab616b1f2a61e9fedd549224483 - src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c 21a11759ed2afd746a47c4d78b67640c2d052165 - src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c 98a850359fe08a7e39212f89ce96014ba80910da - src/common/softfloat/source/8086-SSE/s_f16UIToCommonNaN.c 0cbae7a5abc336331d460cbd3640d2cda02af434 - src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c 4cd1d6cfca3936a39aab9bc0eb622f5c7c848be1 - src/common/softfloat/source/include/softfloat_types.h 1ded4df85ff5fa904fa54c27d681265425be1658 - src/common/softfloat/source/include/primitiveTypes.h 5f589a4d48cc59a0e5762303df9ea4a06ca398da - src/common/softfloat/source/include/softfloat.h 9d8a025889f3ec0e1cca7c4b52308158e1f39226 - src/common/softfloat/source/include/primitives.h f118cad66d3c8ee17a52cec97cd3dc7e7a1cf2bc - src/common/softfloat/source/include/internals.h 14045fa6330dc6ed20d35eac5b4c5909631bca90 - src/common/src/nv_smg.c abccf0a8732b881d904d937287ced46edcde45ac - src/nvidia/Makefile c5f16fdf43ca3d2845d120c219d1da11257072b0 - src/nvidia/nv-kernel.ld dcf4427b83cce7737f2b784d410291bf7a9612dc - src/nvidia/arch/nvalloc/unix/include/nv-reg.h 4750735d6f3b334499c81d499a06a654a052713d - src/nvidia/arch/nvalloc/unix/include/nv-caps.h 3c61881e9730a8a1686e422358cdfff59616b670 - src/nvidia/arch/nvalloc/unix/include/nv_escape.h 7fc52a43b242a8a921c2707589fa07c8c44da11c - src/nvidia/arch/nvalloc/unix/include/nv.h 81592e5c17bebad04cd11d73672c859baa070329 - src/nvidia/arch/nvalloc/unix/include/nv-chardev-numbers.h e69045379ed58dc0110d16d17eb39a6f600f0d1d - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-lockless-diag.h d1b1a1bc1fa30c1a966e95447f7831a06340d2d0 - src/nvidia/arch/nvalloc/unix/include/nv-priv.h 7e0175a8006f06b1d5f5be078d851a4f01648b96 - src/nvidia/arch/nvalloc/unix/include/nv-nb-regs.h 2eb11e523a3ecba2dcd68f3146e1e666a44256ae - src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h 5f004c33f130e6c5cd275f9c85d46185e4e9b757 - src/nvidia/arch/nvalloc/unix/include/os_custom.h 499e72dad20bcc283ee307471f8539b315211da4 - src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h 824ffbe85c591c7423855bee7bf3193473ef2b70 - src/nvidia/arch/nvalloc/unix/include/osapi.h 669bd0c054b00a74e8996c18063fa9bbf5cd7690 - src/nvidia/arch/nvalloc/unix/include/os-interface.h 2ffd0138e1b3425ade16b962c3ff02a82cde2e64 - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numa.h b3ecb82f142a50bdc37eafaeb86d67f10fbcf73f - src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h af45762b6eeae912cc2602acf7dc31d30775ade7 - src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h 107d1ecb8a128044260915ea259b1e64de3defea - src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h 3a26838c4edd3525daa68ac6fc7b06842dc6fc07 - src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h 98a5a3bd7b94e69f4e7d2c3a1769583c17ef5b57 - src/nvidia/arch/nvalloc/unix/src/os.c a659a503a6fcffdcacd2b76ae6b1f156b4b9216c - src/nvidia/arch/nvalloc/unix/src/osmemdesc.c b5ae9b8d551a3e5489605c13686fb6cce4579598 - src/nvidia/arch/nvalloc/unix/src/power-management-tegra.c a17aae37486b325442e447489b64add3694ab8b0 - src/nvidia/arch/nvalloc/unix/src/osunix.c b5b409625fde1b640e4e93276e35248f0fccfa4c - src/nvidia/arch/nvalloc/unix/src/gcc_helper.c 07f9c0995f1fbbba9eb819321996b57c1d2b86cd - src/nvidia/arch/nvalloc/unix/src/exports-stubs.c d8815125dbf79831b8fe55367bba60e7115243cc - src/nvidia/arch/nvalloc/unix/src/osinit.c ef270b45ff3d72db9b319408c8bb060303e589f5 - src/nvidia/arch/nvalloc/unix/src/osapi.c a7383deea9dcab093323d8dde1ede73f85f93343 - src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c b1a6d0a1ca4307b8e8d9cf136c94ef7c9efbae4c - src/nvidia/arch/nvalloc/unix/src/registry.c 915ee6dbffff92a86d68ac38549b25aa1e146872 - src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c ffea38efca6a43af9bc61bb6cb8c2b14c3d6fc20 - src/nvidia/arch/nvalloc/unix/src/escape.c d1089d8ee0ffcdbf73a42d7c4edb90769aa79d8c - src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h 8530e3d1db60647a9132e10c2119a75295f18060 - src/nvidia/arch/nvalloc/common/inc/nv-firmware.h 1cd024cc06bba6f7c3663ca2d03fe25bd77761d3 - src/nvidia/generated/g_gpu_mgmt_api_nvoc.c 0be1c1ff5f200a9aa68cdf3d03bc4780e757a1ea - src/nvidia/generated/g_traceable_nvoc.h 998d18bc2f6e2cdd00cf383000b66be8e8778baa - src/nvidia/generated/g_nv_debug_dump_nvoc.h 4491368ac52cfda834bdd24df3b6f156c32ec3a9 - src/nvidia/generated/g_client_nvoc.c 4eb2331b2f9f8d8c01d62ad771702e9b42f22b65 - src/nvidia/generated/g_lock_stress_nvoc.h 6b5bf7b2f5dd000bfa2949e14642dd582ba4a378 - src/nvidia/generated/g_event_buffer_nvoc.h cd5f4b0bc23710e5b6277ff214a62c4993e95581 - src/nvidia/generated/g_code_coverage_mgr_nvoc.c b9903d23010ea9d63117c27d5fe0cfba09849fa4 - src/nvidia/generated/g_context_dma_nvoc.c 4b7aaad308f2f25b07d932fc0fe0c3327db522a9 - src/nvidia/generated/g_objtmr_nvoc.h 7bd355d08dc6f2509db22ed56f1c05ab97f5f620 - src/nvidia/generated/g_allclasses.h 4eea9bd7952613f08af07508e2e9c1c0344940e7 - src/nvidia/generated/g_gpu_mgr_nvoc.h c5cad88aa7de5a04a3b6f9836f355347448d6a7b - src/nvidia/generated/g_rmconfig_util.h db1d1e047d00780efbe4c1c1ae6e4fecd3ab49e8 - src/nvidia/generated/g_os_desc_mem_nvoc.h 1ec59322d0874153252a387dcb50bf6d7328d56e - src/nvidia/generated/g_system_mem_nvoc.c 21e57b9c63e847eeb5a29c218db2c5c37db83298 - src/nvidia/generated/g_gpu_nvoc.c 4613f3d42dbc899b278fca71c3aaae79159d7dbe - src/nvidia/generated/g_gpu_user_shared_data_nvoc.c b55573cb02ff8129aa4f5aa050ac53d1f4fcfdb2 - src/nvidia/generated/g_rs_resource_nvoc.h 16c8d551a3a908ec194d39c88c5603cea436c9b7 - src/nvidia/generated/g_binary_api_nvoc.c a232e1da560db2322a921a9f0dc260ad703af2b4 - src/nvidia/generated/g_mem_nvoc.h c503ca5954b8f6ebdba96904a1616a55ce08a2d3 - src/nvidia/generated/g_device_nvoc.c e7cc58e9f8173583bd253fa73df56324e48aa5ad - src/nvidia/generated/g_io_vaspace_nvoc.h b93ab0b9e39ca3c5b397cbdba58e4d9894d4130f - src/nvidia/generated/g_rpc-structures.h afda2b8579ed309e23be0ad1a835ee84fcbe535f - src/nvidia/generated/g_client_nvoc.h e97edab623386f7d1534b4f053a66fc8659167f6 - src/nvidia/generated/g_event_nvoc.h f4b2bffbdbb2b0b398e8dfe3420e46b2bf27839c - src/nvidia/generated/g_hal_nvoc.h 4626f4a1a4eadc3695d79454db25bd0153d1165d - src/nvidia/generated/g_resource_fwd_decls_nvoc.h 30035e0fb1ae8b816fc42b78a17eb30462640ce4 - src/nvidia/generated/g_kernel_head_nvoc.h 52ae6273ddf101e9715aed99991506cad8e96859 - src/nvidia/generated/g_disp_inst_mem_nvoc.c abc769851bd523ee08cf829bf3864cf5475066ec - src/nvidia/generated/g_subdevice_nvoc.h 255c404719b18c2a3aec2a47948c0fbcf4affd4b - src/nvidia/generated/rmconfig.h c7fda8cbe109ad2736694ce9ec0e2ab93d0e3f2c - src/nvidia/generated/g_mem_list_nvoc.h f9bdef39159a8475626a0edcbc3a53505a0ff80a - src/nvidia/generated/g_os_hal.h dc7bbba203ee5ff91b6f14eb3abfad8c15854e1d - src/nvidia/generated/g_mem_desc_nvoc.h 1702c9d021149c0f5c73ebeda7bea29e246af31d - src/nvidia/generated/g_nv_name_released.h 2e0c45e4186d44774286a71daf797c980c2ddf7a - src/nvidia/generated/g_objtmr_nvoc.c 9b78bc02a8fe0ec297167bb4bdb7f8255b94198b - src/nvidia/generated/g_disp_capabilities_nvoc.h 967d8c0d7d5c1271e82f30af992f48322695d367 - src/nvidia/generated/g_eng_state_nvoc.h 831cdf0767703c00918e70ef3933716b201781f1 - src/nvidia/generated/g_syncpoint_mem_nvoc.c ce74dbd8f88f50af0b3ea3b3034395cd98eb08e8 - src/nvidia/generated/g_gpu_access_nvoc.c 08ad957117efefe2e04448bce1cad2dec0e984af - src/nvidia/generated/g_odb.h 033a6d6bac0829783afe8a582fa6c4f329be7f04 - src/nvidia/generated/g_hypervisor_nvoc.h c1471919f6c19e1b576b7c636ba5ae7ab9d58177 - src/nvidia/generated/g_gpu_db_nvoc.c f68b7e209e268d14b0b98686d1766683139b9b5f - src/nvidia/generated/g_system_nvoc.c cdcab5a0094b9e9664f7a0e62ec31783617de5ab - src/nvidia/generated/g_code_coverage_mgr_nvoc.h 5e614b6db957a0ae77502ca6d5966bca506f8020 - src/nvidia/generated/g_gpu_group_nvoc.h eb15207a28b8eed41182de6311ec48f5e321729f - src/nvidia/generated/g_gpu_user_shared_data_nvoc.h ef9def144aaf1b2b292c9815c68a6007eff56dda - src/nvidia/generated/g_rs_server_nvoc.c eb07ee114f8cfc039978cdb7501c3ea03c879864 - src/nvidia/generated/g_generic_engine_nvoc.c d2f3d17e05337992bc031c823186583d62c10235 - src/nvidia/generated/g_chips2halspec_nvoc.h ad94c2430328b91392db363158fa2279b794cc54 - src/nvidia/generated/g_gpu_resource_nvoc.h c77048521f9c9890f14108c2c5457d78a85fe69d - src/nvidia/generated/g_gpu_access_nvoc.h 38a98487eec65d8807e47f99b013619c1537e983 - src/nvidia/generated/g_dce_client_nvoc.c d09bde39b1f12490ea0a696d6915d521c9f13953 - src/nvidia/generated/g_rpc-message-header.h 9becba61ba5ff7580b353abfb87cbe0f37817195 - src/nvidia/generated/g_binary_api_nvoc.h 50f70075eac2515b189e2d07a06b13cfa826945f - src/nvidia/generated/g_rs_client_nvoc.h f8b984c6bc09554753cfe6692dde2eb3171abc57 - src/nvidia/generated/g_disp_channel_nvoc.h 4931b316fc042705a5f094c8c23b0038f980b404 - src/nvidia/generated/g_generic_engine_nvoc.h 2a28557874bd51f567ef42c75fd4e3b09d8ad44d - src/nvidia/generated/g_gpu_arch_nvoc.c a17058fe665949f1e3861fe092e29b229cefbe62 - src/nvidia/generated/g_mem_mgr_nvoc.h 7aa02b964507a8269d35dc56170955025b98bd1a - src/nvidia/generated/g_gpu_arch_nvoc.h 0b9296f7797325b80ff0900f19a3763b564eb26b - src/nvidia/generated/g_context_dma_nvoc.h 4210ff36876e84e0adf1e9d4afb6654c7e6e5060 - src/nvidia/generated/g_resserv_nvoc.h 3613b4ec9b285a4e29edefa833704789c887c189 - src/nvidia/generated/g_tmr_nvoc.c 517b6b986a3749c9a6dd0f22bbef6569cdb48d97 - src/nvidia/generated/g_rs_client_nvoc.c 7670f19682bcd6224c999a8f80e770368e735632 - src/nvidia/generated/g_lock_stress_nvoc.c b348b1b465cb359ca3cf10f5e121714ffb95b582 - src/nvidia/generated/g_standard_mem_nvoc.c 54fa23e7cf0f07d625c25d5c08dad9cd1714f851 - src/nvidia/generated/g_standard_mem_nvoc.h 7e528d775caa7ff2bf4159c94fc2c2e4d3aadffc - src/nvidia/generated/g_chips2halspec_nvoc.c 40aa2c65168c893c725c983b2219ceff03d05608 - src/nvidia/generated/g_gpu_halspec_nvoc.h 17c4ce5e67bf8bc8f48a4e2b1b7752d4597703ad - src/nvidia/generated/g_kernel_head_nvoc.c 3ad8329c7f7d63633b7abf2cdd502e4257fa1726 - src/nvidia/generated/g_event_nvoc.c 7aba35752cd4c6447f844cd9432d7dc1bc77b33d - src/nvidia/generated/g_disp_capabilities_nvoc.c fa3a5418a5d6bd7fb2b375ed7f7b64293fdf5f86 - src/nvidia/generated/g_ioaccess_nvoc.h 3c3961ddf6422294c3322e3b0a3c97ee94bfd010 - src/nvidia/generated/g_gpu_mgr_nvoc.c b73b22368abf741cc0a5108b6c9585a81de28b57 - src/nvidia/generated/g_hal.h 6e219df1367ce7dc8f5f4a1f2209a7808a927871 - src/nvidia/generated/g_hal_mgr_nvoc.c 279538daf54163a7a53aab1330fba2c00fc3f234 - src/nvidia/generated/g_rmconfig_util.c 49e84272bbce137683232275b4f13a19c644c650 - src/nvidia/generated/g_prereq_tracker_nvoc.h 57eb0772bc280690eade3f5d54f786e252c75099 - src/nvidia/generated/g_object_nvoc.c 113297c44e702cd6535e007c1c5b2dd5e6f809dc - src/nvidia/generated/g_ioaccess_nvoc.c 216040d1883e8c4f1e8b47d9f6b279ec111d094d - src/nvidia/generated/g_hal_mgr_nvoc.h 113b10cf6cef2608ff4a288e2944d56da64f355d - src/nvidia/generated/g_gpu_group_nvoc.c 86bb88ccdfa34510d4acf21684e5b8bd32d820b2 - src/nvidia/generated/g_disp_sf_user_nvoc.h 5c0ed2e135f53ca09fbfb542bea88b304a2e1208 - src/nvidia/generated/g_event_buffer_nvoc.c 979082b8c018eee55d880265f7bfd294360816c6 - src/nvidia/generated/g_hda_codec_api_nvoc.c f917323efc9429fcea8643eb9a8d5ee46b1b50a5 - src/nvidia/generated/g_eng_state_nvoc.c 437329a9c6e35e4b02945ec035448e704521280e - src/nvidia/generated/g_hda_codec_api_nvoc.h fba7a2891fe10e837f5897034b8176a7307fbb12 - src/nvidia/generated/g_lock_test_nvoc.h 05269b7e73347b580f11decf0e1b9f467d0cb60c - src/nvidia/generated/g_dce_client_nvoc.h e175ab2ef1fd5b64c9f0d665a26b2ed6f864b106 - src/nvidia/generated/g_vaspace_nvoc.h cc7ec616b034ec01da1c5176b6c62759c3f31a06 - src/nvidia/generated/g_subdevice_nvoc.c 93f9738c0e8aa715592306ddf023adf6b548dcc4 - src/nvidia/generated/g_nvh_state.h 1745f3002758556d1b6d11a24d088ef87ba18bd5 - src/nvidia/generated/g_virt_mem_mgr_nvoc.c 8c9f26e959fa9a6a3c4a5cb8875458cc4a9bfe9e - src/nvidia/generated/g_os_nvoc.c 3b0e038829647cfe0d8807579db33416a420d1d2 - src/nvidia/generated/g_chips2halspec.h a1fad555b8ad36437992afdd6e3e08d236167ac7 - src/nvidia/generated/g_journal_nvoc.h d210a82e3dda39239201cfc1c2fcb2e971915c1e - src/nvidia/generated/g_device_nvoc.h 836f88914b046eadad9435786e1b474ee6690f5f - src/nvidia/generated/g_gpu_nvoc.h ea0d27b0f05818e2e44be7d04b31f8843e1d05b7 - src/nvidia/generated/g_io_vaspace_nvoc.c 10529db24fb0501aa7f2aae25e0a87247ab5405c - src/nvidia/generated/g_resource_nvoc.h 5d47bed309c731bfee4144f61093192e7efcaa55 - src/nvidia/generated/g_disp_channel_nvoc.c 8771d8f2cf58f5e1d91ece01c1962677cebc5e4b - src/nvidia/generated/g_rmconfig_private.h 951c1c8969a621344d4d2a3ec61b1ad51b39ea79 - src/nvidia/generated/g_client_resource_nvoc.c 629b6daac6c9215dc982973b6adcf84314d34d57 - src/nvidia/generated/g_gpu_halspec_nvoc.c 29d5ccf874298c8156314a6eb23c209f2920b779 - src/nvidia/generated/g_gpu_resource_nvoc.c fc26ab853e7c981c271ced30dfd78d95cd9bcdfd - src/nvidia/generated/g_gpu_db_nvoc.h aa76beb8b33254fae884434b688093f9c7f12c87 - src/nvidia/generated/g_hal_private.h 86739259b5059c9b9ea3061bd8d1846385cb95f4 - src/nvidia/generated/g_sdk-structures.h 41bc858f6aca964a8977ad96911ecf1e8b46385d - src/nvidia/generated/g_hal_archimpl.h f87916eae53dbea2f6bdbe80a0e53ecc2071d9fd - src/nvidia/generated/g_lock_test_nvoc.c 6b8597803d509372152e3915f15139186294add5 - src/nvidia/generated/g_gpu_class_list.c 2101385d1332db9a2902370a6b3c6117ca8b2737 - src/nvidia/generated/g_kern_disp_nvoc.h d71ff42bc0fc0faf1999a6cbe88c4492a47e200e - src/nvidia/generated/g_os_nvoc.h e58abb783f7561d0af925c2fca392c5165fcb199 - src/nvidia/generated/g_kern_disp_nvoc.c d6a34926ab710156c9c4b2d9f12a44e6dafd43d1 - src/nvidia/generated/g_tmr_nvoc.h c4c67b0e0284656b32c7b4547e22d521c442124a - src/nvidia/generated/g_disp_objs_nvoc.h 8e49b4d77641c98c6101dbc88a79290ceca6271a - src/nvidia/generated/g_rs_server_nvoc.h af206c390549eff5d690ad07f3e58cd417f07f5f - src/nvidia/generated/g_hal_register.h be659882e731b6a2019639265af46239c5c96ebf - src/nvidia/generated/g_hal_nvoc.c db76e8669776fbfa901c60d9b9908af2fabc4703 - src/nvidia/generated/g_virt_mem_mgr_nvoc.h 797bd0197236fb0afc2c7e052487db803ac5baf0 - src/nvidia/generated/g_rs_resource_nvoc.c 884bed29fb4735ae0b4504fc874702acd29ee541 - src/nvidia/generated/g_mem_mgr_nvoc.c 3168beb42f15591a50339692d502e04977615a7b - src/nvidia/generated/g_prereq_tracker_nvoc.c 8e0071daaf5471a0fb3856705ec993704eaed4b5 - src/nvidia/generated/g_disp_inst_mem_nvoc.h fb464cf839a1e76ac2a27346c7cd46ca921f1f56 - src/nvidia/generated/g_traceable_nvoc.c 8588d6f88ab5e8682952063fe0e2c840b334c622 - src/nvidia/generated/g_eng_desc_nvoc.h de99523103dd7df0934cbe7aa21179ec7f241817 - src/nvidia/generated/g_os_desc_mem_nvoc.c aa43dd8bdbdc71dc64d65e948221c7d5235588e7 - src/nvidia/generated/g_disp_objs_nvoc.c 9b6cc3a5e9e35139e9245cbe753fe9a552a488c0 - src/nvidia/generated/g_syncpoint_mem_nvoc.h ae311b0968df9e9c9c2cec89e3060c472fc70a4c - src/nvidia/generated/g_mem_nvoc.c dc7a782be9a0096701771cb9b2dc020c2f814e6d - src/nvidia/generated/g_system_nvoc.h 93a47004dd1c7529c6ee5f8abdf8b49c336fb681 - src/nvidia/generated/g_disp_sf_user_nvoc.c 3b5dfad8fccd7251cc177c7ea1b90265b4b6c901 - src/nvidia/generated/g_gpu_mgmt_api_nvoc.h b53ec15a1aaf102d42b79881cd1b270afeb7205c - src/nvidia/generated/g_system_mem_nvoc.h 67b2d3ea81ebe7be679bcafc688ced0d64f16edf - src/nvidia/generated/g_object_nvoc.h b1be7145e70d8811fbdbe07c0e99f32ad0e38429 - src/nvidia/generated/g_client_resource_nvoc.h 0d5b87b117d39b173a2a21a5cd71572bc2b26697 - src/nvidia/generated/g_resource_nvoc.c 51df7972f9932c2a5d800d4e2b3e4828e5aa2038 - src/nvidia/generated/g_vaspace_nvoc.c 0820fa0a975b2474ce0fdf64508cbd7758f60e5c - src/nvidia/generated/g_ref_count_nvoc.h fff3ebc8527b34f8c463daad4d20ee5e33321344 - src/nvidia/inc/lib/ref_count.h ec26741397ebd68078e8b5e34da3b3c889681b70 - src/nvidia/inc/lib/base_utils.h f8d9eb5f6a6883de962b63b4b7de35c01b20182f - src/nvidia/inc/lib/protobuf/prb.h 601edb7333b87349d791d430f1cac84fb6fbb919 - src/nvidia/inc/lib/zlib/inflate.h 671c628ff9d4e8075f953766adcab9bfc54bd67c - src/nvidia/inc/libraries/poolalloc.h 1e8730e4abd210e3c648ef999ccc2b1f1839b94c - src/nvidia/inc/libraries/field_desc.h 8dd7f2d9956278ed036bbc288bff4dde86a9b509 - src/nvidia/inc/libraries/eventbufferproducer.h 1b28bd0ee2e560ca2854a73a3ee5fb1cf713d013 - src/nvidia/inc/libraries/nvoc/utility.h d3cd73c0c97a291e76e28a6e3834d666e6452172 - src/nvidia/inc/libraries/nvoc/prelude.h 79b556739f0648cec938f281794663433fc5e048 - src/nvidia/inc/libraries/nvoc/runtime.h 91c67f272f0ada6f386e9f4a78fbde70aa5c883d - src/nvidia/inc/libraries/nvoc/object.h c0f66cf7b2fb6ca24b5d4badede9dcac0e3b8311 - src/nvidia/inc/libraries/nvoc/rtti.h a3db778e81f7188a700e008e4c5f5b1320ab811e - src/nvidia/inc/libraries/mmu/gmmu_fmt.h 1daea206ab581fa3554ff1811e1253a7d0053ac0 - src/nvidia/inc/libraries/mmu/mmu_fmt.h 56b8bae7756ed36d0831f76f95033f74eaab01db - src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h b8e52b576e6668e4de7ea65a31e12c2bb491a591 - src/nvidia/inc/libraries/mapping_reuse/mapping_reuse.h e772583f7fbf994fcf923d527d42372a716b4c57 - src/nvidia/inc/libraries/ioaccess/ioaccess.h 26853c886d848fb88e14da3aceab23f90589c05d - src/nvidia/inc/libraries/utils/nvprintf_level.h c314121149d3b28e58a62e2ccf81bf6904d1e4bc - src/nvidia/inc/libraries/utils/nvmacro.h 72dcc09b77608263573bd34adf09393328eddf86 - src/nvidia/inc/libraries/utils/nvrange.h b598ccd2721892b6915d4be432f1fc332477b666 - src/nvidia/inc/libraries/utils/nvbitvector.h 9aa5870d052a45c2489a6ea1a4f2e30fbc52d6be - src/nvidia/inc/libraries/utils/nv_enum.h 4849eb6c567e3ba952c22e702461c1a84ec88c6a - src/nvidia/inc/libraries/utils/nvprintf.h 1b265cb4fcc628862e4b27ae63a897871987eb76 - src/nvidia/inc/libraries/utils/nvassert.h 39113db75fdab5a42f9d8653ed1c90018b8b1df4 - src/nvidia/inc/libraries/containers/map.h 11ce1423312f4c34df19672e45678d0531cc299d - src/nvidia/inc/libraries/containers/ringbuf.h 5f116730f8b7a46e9875850e9b6ffb2a908ad6c2 - src/nvidia/inc/libraries/containers/btree.h fc211c8276ebcee194080140b5f3c30fba3dfe49 - src/nvidia/inc/libraries/containers/queue.h 661b551f4795f076d7d4c4dab8a2ae2f52b0af06 - src/nvidia/inc/libraries/containers/list.h 47c69b04f95664e742f1a0a02711eeb1fb71000b - src/nvidia/inc/libraries/containers/eheap_old.h 5da20ecad3ff8405dea782792c6397d21ba76f7c - src/nvidia/inc/libraries/containers/vector.h bcfc41a04576a4244c9dc3fe2a32c8e582f16c3e - src/nvidia/inc/libraries/containers/type_safety.h 5cabf8b70c3bb188022db16f6ff96bcae7d7fe21 - src/nvidia/inc/libraries/containers/multimap.h 4e26106c9c758c9e48418451ac01cf591ed74a31 - src/nvidia/inc/libraries/nvlog/nvlog_printf.h 41843197a5c11abc93df89b8f10a5f815e7fe6af - src/nvidia/inc/libraries/nvlog/nvlog.h 13aedc8ccf6acdd71be71b2219f79cd1af411273 - src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h 7c9c9456aaacbeffa11a9af54fe2250095ebbb00 - src/nvidia/inc/libraries/tls/tls.h 87a130551593551380ac3e408f8044cc0423c01a - src/nvidia/inc/libraries/nvport/nvport.h 2487ffc1eb1e50b27ba07e0581da543d80bdaa72 - src/nvidia/inc/libraries/nvport/safe.h 4bf45849bc1c6b89d7a79d761cce84a1d5026eac - src/nvidia/inc/libraries/nvport/debug.h 147d47ef4bd860394d1d8ae82c68d97887e2898b - src/nvidia/inc/libraries/nvport/core.h 6d698ca4fc5e48c525f214a57e1de0cc4aa9e36b - src/nvidia/inc/libraries/nvport/thread.h 6065fa9a525d80f9b61acb19e476066823df0700 - src/nvidia/inc/libraries/nvport/sync.h a1d93b6ec8ff01a3c2651e772a826ee11a7781d7 - src/nvidia/inc/libraries/nvport/util.h fb5a011275328b7c1edc55abc62e604462b37673 - src/nvidia/inc/libraries/nvport/atomic.h 16a35b2b6fd6eb855acd64d72480b285795f54b2 - src/nvidia/inc/libraries/nvport/memory.h f31ed19d0588861b8c2b1489dd4e70d430110db5 - src/nvidia/inc/libraries/nvport/crypto.h 96c7c30c9f6503675f0903a16207a0ac06a6963d - src/nvidia/inc/libraries/nvport/cpu.h 53d843988669f61528cd45099ced749defa4cf7e - src/nvidia/inc/libraries/nvport/string.h d1863efe7b8a63f1c5a7f47856b95ad31fd1a561 - src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h 9596b274389ea56acff6ca81db8201f41f2dd39d - src/nvidia/inc/libraries/nvport/inline/atomic_clang.h a8c9b83169aceb5f97d9f7a411db449496dc18f6 - src/nvidia/inc/libraries/nvport/inline/util_generic.h bbece45965ffbc85fbd383a8a7c30890c6074b21 - src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h a7cb79bf7ac48e0f5642ecfd2e430bb85587dddf - src/nvidia/inc/libraries/nvport/inline/memory_tracking.h 1d6a239ed6c8dab1397f056a81ff456141ec7f9c - src/nvidia/inc/libraries/nvport/inline/util_valist.h f267235fd8690e1b1d7485d3a815841607683671 - src/nvidia/inc/libraries/nvport/inline/safe_generic.h 645734ed505a4d977490e54b26cdf49657e20506 - src/nvidia/inc/libraries/nvport/inline/sync_tracking.h a902e0f4265bd3dbd251afefa8ceb0389464d886 - src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h 2dec1c73507f66736674d203cc4a00813ccb11bc - src/nvidia/inc/libraries/resserv/rs_domain.h fa5a5d8fa07cae6b8ef9d9135dc5d7e7624533d2 - src/nvidia/inc/libraries/resserv/resserv.h 972165721958839bc1d510fda9409d35ff89ec21 - src/nvidia/inc/libraries/resserv/rs_server.h 883bf7295d707014278e035f670d151275975d18 - src/nvidia/inc/libraries/resserv/rs_resource.h 2ad85ddca7cd230cea917e249871277ef1e59db1 - src/nvidia/inc/libraries/resserv/rs_client.h cd033fe116a41285a979e629a2ee7b11ec99369f - src/nvidia/inc/libraries/resserv/rs_access_rights.h df174d6b4f718ef699ca6f38c16aaeffa111ad3c - src/nvidia/inc/libraries/resserv/rs_access_map.h 5fd1da24ae8263c43dc5dada4702564b6f0ca3d9 - src/nvidia/inc/os/dce_rm_client_ipc.h 4aa45a3755ef172aa35279e87dd5cd83cab1bc2e - src/nvidia/inc/kernel/vgpu/rpc_hal_stubs.h f2fd94a00e5debf1dc7f7ad4c00d417552fb0554 - src/nvidia/inc/kernel/vgpu/rpc.h 37598b6c25aac1a07cbc2bc5c76ebecdbca56eb6 - src/nvidia/inc/kernel/vgpu/rm_plugin_shared_code.h fea4bbeb739723d3b80b5b3d8943e746e58fae07 - src/nvidia/inc/kernel/vgpu/dev_vgpu.h f64d3723d0c475558bed799da8d2c5ec32a7d3a8 - src/nvidia/inc/kernel/vgpu/vgpuapi.h 8bf8282ce6112a2afb2e7f64d138d6ce90cf37c0 - src/nvidia/inc/kernel/vgpu/rpc_global_enums.h 69360faa428e157580fac445bcf601f44f7646c0 - src/nvidia/inc/kernel/vgpu/rpc_headers.h b9af629ab29b527f7830b78f52b55b8535b8dbfd - src/nvidia/inc/kernel/vgpu/vgpu_util.h e33b5b8c324c23d28e91324a87b47a24823dc5f5 - src/nvidia/inc/kernel/vgpu/rpc_vgpu.h af9d17b204fdddc6f97280fdafd5a414ee8274dc - src/nvidia/inc/kernel/diagnostics/code_coverage_mgr.h c6efd51b8b8447829a0867cd7fb7a5a5a2fb1e3d - src/nvidia/inc/kernel/diagnostics/traceable.h fd780f85cb1cd0fd3914fa31d1bd4933437b791d - src/nvidia/inc/kernel/diagnostics/tracer.h 7e75b5d99376fba058b31996d49449f8fe62d3f0 - src/nvidia/inc/kernel/diagnostics/profiler.h 7615ac3a83d0ad23b2160ff8ad90bec9eb1f3c6c - src/nvidia/inc/kernel/diagnostics/journal.h b259f23312abe56d34a8f0da36ef549ef60ba5b0 - src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h 7f3f19ed69089ba05f5cac44982547718dbf4662 - src/nvidia/inc/kernel/diagnostics/xid_context.h 3a28bf1692efb34d2161907c3781401951cc2d4f - src/nvidia/inc/kernel/diagnostics/journal_structs.h 8ef620afdf720259cead00d20fae73d31e59c2f7 - src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h 701375e96d771b4105f5fe4949ed4a542be4f3d7 - src/nvidia/inc/kernel/os/os_stub.h 408c0340350b813c3cba17fd36171075e156df72 - src/nvidia/inc/kernel/os/os.h c8496199cd808ed4c79d8e149961e721ad96714e - src/nvidia/inc/kernel/os/capability.h cda75171ca7d8bf920aab6d56ef9aadec16fd15d - src/nvidia/inc/kernel/os/nv_memory_type.h 70b67003fda6bdb8a01fa1e41c3b0e25136a856c - src/nvidia/inc/kernel/os/nv_memory_area.h 497492340cea19a93b62da69ca2000b811c8f5d6 - src/nvidia/inc/kernel/rmapi/event_buffer.h 499c3d0d76276ee9441d57948ea97877c48b1daa - src/nvidia/inc/kernel/rmapi/rmapi.h b4bae9ea958b4d014908459e08c93319784c47dd - src/nvidia/inc/kernel/rmapi/event.h 0500c41247fdecd66f25428d279c6dab72bab13e - src/nvidia/inc/kernel/rmapi/binary_api.h 61e3704cd51161c9804cb168d5ce4553b7311973 - src/nvidia/inc/kernel/rmapi/resource.h 2baec15f4c68a9c59dd107a0db288e39914e6737 - src/nvidia/inc/kernel/rmapi/client.h ac9288d75555180c1d5dd6dd7e0e11fb57a967f2 - src/nvidia/inc/kernel/rmapi/exports.h 835f193521f216d29c678a6018cd9791914b6c01 - src/nvidia/inc/kernel/rmapi/lock_stress.h b9ff9b201bf2df8651f0c408158aa617638868f6 - src/nvidia/inc/kernel/rmapi/rmapi_specific.h 20adc296ffe79f27d5c24c70716c972a2e0c9a5d - src/nvidia/inc/kernel/rmapi/control.h deed1715907c1dab8e3304bd4f63b688b72104b7 - src/nvidia/inc/kernel/rmapi/mapping_list.h 4453fe6463e3155063f2bdbf36f44697606a80a5 - src/nvidia/inc/kernel/rmapi/client_resource.h 6cc2de07b21fb21cef1b5b87fb2f1c935782262c - src/nvidia/inc/kernel/rmapi/rs_utils.h 35a65c31c6dcc2824011245ff6e2d5a30f95525c - src/nvidia/inc/kernel/rmapi/rmapi_utils.h a92dbf2870fe0df245ea8967f2f6a68f5075ecaf - src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h 23e243f9abcb2a4f2d10d141303cd55677b04436 - src/nvidia/inc/kernel/rmapi/rmapi_cache_handlers.h 2724476b61b1790f1b7c293cc86e8a268125e11c - src/nvidia/inc/kernel/rmapi/param_copy.h 15f788614e08d805e963653460858cf013fe0178 - src/nvidia/inc/kernel/rmapi/lock_test.h 2b23f2dbd8f3f63a17a1b63ebb40a2fd7fd8801a - src/nvidia/inc/kernel/rmapi/alloc_size.h 893ec596aab365c2ff393bf2b96aea57f37d01f8 - src/nvidia/inc/kernel/platform/nvpcf.h 5e9928552086947b10092792db4a8c4c57a84adf - src/nvidia/inc/kernel/platform/acpi_common.h e762205698aff945603324331b443bb2f20cf778 - src/nvidia/inc/kernel/platform/sli/sli.h 15754215ec49815f547dd999b2262a34670dde0b - src/nvidia/inc/kernel/core/locks.h bdc4ab675c6f6c4bd77c3aaf08aa5c865b186802 - src/nvidia/inc/kernel/core/hal.h ad378b09a277fba0efd3291d167e1d21071bdf1b - src/nvidia/inc/kernel/core/printf.h a054be86a4476ba7b9a97052dfcfa4155e059cb9 - src/nvidia/inc/kernel/core/info_block.h bffae4da6a1f9b7dc7c879587fd674b49b46dac1 - src/nvidia/inc/kernel/core/core.h 37f267155ddfc3db38f110dbb0397f0463d055ff - src/nvidia/inc/kernel/core/strict.h b00302aec7e4f4e3b89a2f699f8b1f18fc17b1ba - src/nvidia/inc/kernel/core/hal_mgr.h 2d741243a6ae800052ddd478cc6aa7ad0b18f112 - src/nvidia/inc/kernel/core/prelude.h ebc7c06d9e94218af4cf6b0c03e83650e391e5bc - src/nvidia/inc/kernel/core/thread_state.h b5859c7862fb3eeb266f7213845885789801194a - src/nvidia/inc/kernel/core/system.h 07f45cd5fab5814e21b9e84425564b43776118fd - src/nvidia/inc/kernel/gpu/gpu_resource_desc.h 7010ff346c27b6453c091f5577672b8b1821808d - src/nvidia/inc/kernel/gpu/gpu_access.h 10ba0b9d4c67c8027b391073dab8dc4388f32fd7 - src/nvidia/inc/kernel/gpu/nvbitmask.h 59f72837997cb0c8ffc491d9a61c61e61b9dca94 - src/nvidia/inc/kernel/gpu/gpu_shared_data_map.h bca121fb72d54afd714654f1a50eb7192da3135f - src/nvidia/inc/kernel/gpu/gpu_uuid.h 3f0f23a15201105779f3d25dc7628b42990c4b7e - src/nvidia/inc/kernel/gpu/gpu_timeout.h 1ac9c8bf155d1f25f790032b2b6306223199d9ff - src/nvidia/inc/kernel/gpu/gpu_arch.h f17b704f2489ffedcc057d4a6da77c42ece42923 - src/nvidia/inc/kernel/gpu/gpu_resource.h 28d0d82b58ef13662e8896d3bbc42d340836294e - src/nvidia/inc/kernel/gpu/gpu_user_shared_data.h e33e4d1537839e41898ff0fab8949e90ee1aed46 - src/nvidia/inc/kernel/gpu/gpu_device_mapping.h 426c6ab6cecc3b1ba540b01309d1603301a86db1 - src/nvidia/inc/kernel/gpu/eng_desc.h 5f5677bee452c64a1b890c3eb65e81fda66ddbaa - src/nvidia/inc/kernel/gpu/error_cont.h d624e0c45cc8ad24e8c0b2fb5281c0c8a1c7a6d3 - src/nvidia/inc/kernel/gpu/gpu_engine_type.h c33ab6494c9423c327707fce2bcb771328984a3c - src/nvidia/inc/kernel/gpu/gpu_halspec.h 145b1bc37e6c36b466ea33dd0579d22b530d8dd3 - src/nvidia/inc/kernel/gpu/kern_gpu_power.h c771936af1de030194894db1312d847038ddb0cb - src/nvidia/inc/kernel/gpu/gpu_child_list.h 0e8353854e837f0ef0fbf0d5ff5d7a25aa1eef7c - src/nvidia/inc/kernel/gpu/eng_state.h 76b24227c65570898c19e16bf35b2cad143f3d05 - src/nvidia/inc/kernel/gpu/gpu.h 0a0c9a8f27feec3e90e15ce9879532ec77450de5 - src/nvidia/inc/kernel/gpu/gpu_acpi_data.h 9ed922ffed4454a10c5e2d8b3123ed653ec653e4 - src/nvidia/inc/kernel/gpu/gpu_ecc.h f2947fefcaf0611cd80c2c88ce3fdea70953c1ed - src/nvidia/inc/kernel/gpu/gpu_child_class_defs.h efc50bb2ff6ccf1b7715fd413ca680034920758e - src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h 24d01769b39a6dd62574a95fad64443b05872151 - src/nvidia/inc/kernel/gpu/subdevice/subdevice.h 576216219d27aa887beeccefc22bcead4d1234d7 - src/nvidia/inc/kernel/gpu/disp/kern_disp.h 277a2719f8c063037c6a9ed55ade2b1cb17f48ae - src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h 51a209575d3e3fe8feb7269ece7df0846e18ca2a - src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h d0899f0e55e6675e267d4c72577be52e39b66121 - src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h be7da8d1106ee14ff808d86abffb86794299b2df - src/nvidia/inc/kernel/gpu/disp/disp_objs.h 74bc902cd00b17da3a1dfa7fd3ebc058de439b76 - src/nvidia/inc/kernel/gpu/disp/disp_channel.h b39826404d84e0850aa3385691d8dde6e30d70d4 - src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h 24397d051c941427e54cefc1062d8cd977a8725e - src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h 9a33a37c6cea9bad513aa14c942c689f28f7c0d8 - src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h 5179f01acf7e9e251552dc17c0dcd84f7d341d82 - src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h 22fc153d91a3917ac8e3f2aa94f0d52bfb11f7c2 - src/nvidia/inc/kernel/gpu/hfrp/kern_hfrp_commands_responses.h 173e9ecd2224a5259c79f2491302ba4415e82f70 - src/nvidia/inc/kernel/gpu/hfrp/kernel_hfrp.h 3118f2e9b47cfac98a92d195ce67ea63e50bf3ab - src/nvidia/inc/kernel/gpu/hfrp/kern_hfrp_common.h 1feab39692ea8796ac7675f4780dfd51e6e16326 - src/nvidia/inc/kernel/gpu/timer/objtmr.h 0cff83f4fdcc8d025cd68e0a12faaeead09fa03b - src/nvidia/inc/kernel/gpu/timer/tmr.h 71dd4fccd3b601508230a2b8b720aaf531a160ff - src/nvidia/inc/kernel/gpu/gsp/gsp_trace_rats_macro.h e1979c71f3d5ffc92bf2306f9360b70bca0edf1f - src/nvidia/inc/kernel/gpu/gsp/message_queue.h 23d38dc3e66affac9342a839f5ba0d79a40f63ba - src/nvidia/inc/kernel/gpu/gsp/kernel_gsp_trace_rats.h bb9b8ec9840109b15c174da02e7ac85c1e2c0c70 - src/nvidia/inc/kernel/gpu/rpc/objrpc.h 1cc21ad9136024f7437ef745db6652343588c50a - src/nvidia/inc/kernel/gpu/rpc/objrpcstructurecopy.h 7b7cf3b6459711065d1b849bf5acaea10b6400ca - src/nvidia/inc/kernel/gpu/intr/intr_common.h 1e3bebe46b7f2f542eedace554a4156b3afb51f1 - src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h 97d0a067e89251672f191788abe81cf26dcb335f - src/nvidia/inc/kernel/gpu/device/device.h 889ba18a43cc2b5c5e970a90ddcb770ce873b785 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h 6756126ddd616d6393037bebf371fceacaf3a9f1 - src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h e4c67260b5cb693d695ad3d8aa96aaed45688322 - src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h 20416f7239833dcaa743bbf988702610e9251289 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h 407cad27681bde8235305464150e275a4a93b5d5 - src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h 5be45f3abdbb65a8eea959d98499ea8ff9a79de9 - src/nvidia/inc/kernel/gpu/mem_mgr/rm_page_size.h 76de30ac7b722cc5d59fc834d6b9c795ec14d7a5 - src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h ce4e0f7177f46f4fc507a68b635e5395a3f7dde6 - src/nvidia/inc/kernel/gpu/dce_client/dce_client.h 2c48d7335bdb0b7ea88b78216c0aeab2e11e00c1 - src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h 5b151d0d97b83c9fb76b76c476947f9e15e774ad - src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h e188d9f2d042ffe029b96d8fbb16c79a0fc0fb01 - src/nvidia/inc/kernel/gpu_mgr/gpu_db.h ea32018e3464bb1ac792e39227badf482fa2dc67 - src/nvidia/inc/kernel/gpu_mgr/gpu_group.h 02d6a37ef1bb057604cb98a905fa02429f200c96 - src/nvidia/inc/kernel/mem_mgr/mem.h a5f49a031db4171228a27482d091283e84632ace - src/nvidia/inc/kernel/mem_mgr/system_mem.h d15991bc770c5ab41fe746995294c5213efa056b - src/nvidia/inc/kernel/mem_mgr/io_vaspace.h 5ae08b2077506cbc41e40e1b3672e615ce9d910f - src/nvidia/inc/kernel/mem_mgr/vaspace.h 0ce5d6370c086d2944b2e8d31ff72a510d98dc8f - src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h 4c386104eaead66c66df11258c3f1182b46e96ee - src/nvidia/inc/kernel/mem_mgr/syncpoint_mem.h 1a08e83fd6f0a072d6887c60c529e29211bcd007 - src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h 2d4afabd63699feec3aea5e89601db009fc51a08 - src/nvidia/inc/kernel/mem_mgr/standard_mem.h 24928c8b4e8b238f1921a1699f3af59bcff994ed - src/nvidia/src/lib/base_utils.c a6134d6f5f3e3b0b4c274eb3b2d0a146644c842b - src/nvidia/src/lib/zlib/inflate.c 2e57601af217d0d8c4986abb593e8864e53e7e0b - src/nvidia/src/libraries/nvoc/src/runtime.c 9ea8bf51c44e500c9963a12a1e2a71ebffe6c4e8 - src/nvidia/src/libraries/nvbitvector/nvbitvector.c 0e7a9b9c697f260438ca5fda8527b0f4edc2de13 - src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c e5ead344020dfc973ee7c7383e0f687a29642683 - src/nvidia/src/libraries/mapping_reuse/mapping_reuse.c 3c885d2c0e6cfb3f8585bddcba128b02e0196167 - src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c ee7ea17829dfbbf9e6cd8d6c6fb2ada086b5d36e - src/nvidia/src/libraries/ioaccess/ioaccess.c ca2ba7f19b705e39dbb8890a84ce84d34fbd8aa4 - src/nvidia/src/libraries/utils/nvassert.c 864bd314450490b687a652335a44fb407835152c - src/nvidia/src/libraries/containers/ringbuf.c eb919a9e8711830c1c3f7fe71273e0a39862292e - src/nvidia/src/libraries/containers/vector.c 53aa343682f721f57058c7a17b1e872ca6fe7cea - src/nvidia/src/libraries/containers/map.c 7f58f03ec069ad5f5c64fedf4a484cc93473bd04 - src/nvidia/src/libraries/containers/queue.c 23c328fc27ad0317efe6ccd2da71cfd9db9da236 - src/nvidia/src/libraries/containers/multimap.c ae669a466f1fecf67746a9fafc8c1119294c93d7 - src/nvidia/src/libraries/containers/list.c 9c80df385a47834da4f92dc11053ca40a37a7fe7 - src/nvidia/src/libraries/containers/btree/btree.c a0e23ad69d805a7de439f0fbf79241c6466efdc2 - src/nvidia/src/libraries/containers/eheap/eheap_old.c cccb1fedee02a240692688090e00ac1e289dec9e - src/nvidia/src/libraries/tls/tls.c a045a19d750d48387640ab659bb30f724c34b8c8 - src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c d047abe66dd8a459c15224cc056fc6f2176b0c6a - src/nvidia/src/libraries/nvport/util/util_gcc_clang.c f0c486c1ad0f7d9516b13a02d52b4d857d8865b1 - src/nvidia/src/libraries/nvport/util/util_compiler_switch.c 9b69fbf3efea6ba58f9ba7cb0189c9264c994657 - src/nvidia/src/libraries/nvport/sync/sync_common.h eb8b5fcab51c47f58a37958ddb38ff90991bcbbe - src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c b2ae1406c94779f575d3e2233a7ab248ac10e74f - src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h e2fec1a305dfec07456faec8ea5e75f601d76b5e - src/nvidia/src/libraries/nvport/memory/memory_tracking.c c5a16e5bb7d304ffe5e83d7b27226cbecdbc7ce1 - src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c db01179ad5e6333844bd3e31b62d0dc262c98875 - src/nvidia/src/libraries/nvport/memory/memory_generic.h 2c00bd224d17c0cc5469b5140f3be3d23b494922 - src/nvidia/src/libraries/nvport/string/string_generic.c b387005657f81538fab5962d4aabbc5dc681aa1b - src/nvidia/src/libraries/nvport/core/core.c 702c73446bba35f88249cfe609ac0ca39dbd80ff - src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c 9ca28a5af5663dec54b4cd35f48a8a3d8e52e25f - src/nvidia/src/libraries/nvport/cpu/cpu_common.c a305654bafc883ad28a134a04e83bbd409e0fc06 - src/nvidia/src/libraries/nvport/cpu/cpu_common.h 099c17e5931d5d881d8248ec68041fa0bbc2a9bc - src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c 1f2e9d09e658474b36d0b0ecd9380d0d2bcc86b2 - src/nvidia/src/libraries/resserv/src/rs_domain.c f9cb28c60e7063ddb5b2a2af4a053a477c95c74b - src/nvidia/src/libraries/resserv/src/rs_server.c dac54d97b38ad722198ec918668f175dc5122e4e - src/nvidia/src/libraries/resserv/src/rs_access_map.c ede517ff5f53666a23ad2edec7e9fcd85c6ef7d1 - src/nvidia/src/libraries/resserv/src/rs_client.c 26d872a8495e38065af34aed9a60ab9a08898d40 - src/nvidia/src/libraries/resserv/src/rs_resource.c 408e1e5430e5e507e7e59adc292175150e50b825 - src/nvidia/src/libraries/resserv/src/rs_access_rights.c 304e2fb9bbf6d37358779d4e321f33ac76efcd39 - src/nvidia/src/kernel/diagnostics/nvlog.c b3a29311cc22e2dae686f8ed2df6bc828aa826cf - src/nvidia/src/kernel/diagnostics/profiler.c 439543a41a36b0959b5f4c099f4adaa379b9f912 - src/nvidia/src/kernel/diagnostics/code_coverage_mgr.c c1e5733847085bede6eb128eff3bad14549a31db - src/nvidia/src/kernel/diagnostics/nvlog_printf.c d10c5031c3bc00ae1243729c39496df38d2c9ae3 - src/nvidia/src/kernel/os/os_init.c 2255d1ae2d942c3fed9a4b0a41020d0e49cb8648 - src/nvidia/src/kernel/os/os_timer.c b887b661ffbe6c223c60f544b1fab32690cd8c75 - src/nvidia/src/kernel/os/os_sanity.c f228bc86fd9149675cb554d6f596d81fdd4c3770 - src/nvidia/src/kernel/os/os_stubs.c 8800bf3ec679a1c3d36b89992b3f2f95365ec834 - src/nvidia/src/kernel/rmapi/entry_points.c 348c34e13f006f1320536876cb7393d8232e61de - src/nvidia/src/kernel/rmapi/rpc_common.c 8f033323f3ae264a79f779abb163442deb17e88a - src/nvidia/src/kernel/rmapi/rmapi.c bc7c0b5bd06a1c58714b782d85f740632c6e152f - src/nvidia/src/kernel/rmapi/rmapi_cache_handlers.c ac6a5b3adf15eac4a7bd9ae24981f6f5fc727097 - src/nvidia/src/kernel/rmapi/deprecated_context.h b1e57ee17d6641412a4065317be3b81e5db94824 - src/nvidia/src/kernel/rmapi/event_notification.c a965c5f028c1d47d7da0dd03dabbf8aebc817523 - src/nvidia/src/kernel/rmapi/rs_utils.c a2ad052692006f70e97fd3d186f19c7ddfe80c4c - src/nvidia/src/kernel/rmapi/deprecated_context.c 7a0a8914b407f836627d8262de2de6cab2dd691d - src/nvidia/src/kernel/rmapi/rmapi_specific.c d915b65380b59e557e5043f839c42d4105caa111 - src/nvidia/src/kernel/rmapi/rmapi_utils.c 2c5b12d5eb17c313138262cd1e42eb940a4d9ed8 - src/nvidia/src/kernel/rmapi/client.c ab24efdee819d113fe72ec12c0e359c514151336 - src/nvidia/src/kernel/rmapi/resource_desc_flags.h 1745523e56fc0ff5a45d4b2473e13f0cc6f2afb1 - src/nvidia/src/kernel/rmapi/event_buffer.c f70b6d7e8f21bf26d9c8171d62cbdf934fe3a30e - src/nvidia/src/kernel/rmapi/rmapi_stubs.c 09fc97bd7daa74a0b2e55fc5632b2f25464412dc - src/nvidia/src/kernel/rmapi/client_resource.c c21223701bd7afd09e706616105f3f5f365afa5d - src/nvidia/src/kernel/rmapi/rmapi_finn.c 433c6091b3b986151e27ea952cef1dc83ff3095c - src/nvidia/src/kernel/rmapi/lock_test.c 682977753c878ccee6279e539cf11bee2b548752 - src/nvidia/src/kernel/rmapi/resource_desc.c 6dc3f6642c450043cc9b361037f4cb2091e7cb58 - src/nvidia/src/kernel/rmapi/sharing.c 00a6ef509ed8484d038c54b47642bc1a00125077 - src/nvidia/src/kernel/rmapi/lock_stress.c 3b53d6b8ef183702327b4bc3a96aa06f67475ddc - src/nvidia/src/kernel/rmapi/param_copy.c 1c9b26108c6b7f27c5f4fe84e10d83cfb32c9b5b - src/nvidia/src/kernel/rmapi/resource_list.h 3b9809740d88ab4b5b9c9d1adbd3ec304f6f6c7e - src/nvidia/src/kernel/rmapi/resource.c 41c397e2cc8c8b1c9c734c435d2d4c17cf709e63 - src/nvidia/src/kernel/rmapi/mapping_cpu.c 58ed3486109a54829f1afdf214c15529eaed678b - src/nvidia/src/kernel/rmapi/mapping.c 0172aa3770ca55bbfbd5e66f48f4e4820a4d5576 - src/nvidia/src/kernel/rmapi/event.c e26021985ccfa2fb94c96310d9700df405817889 - src/nvidia/src/kernel/rmapi/control.c 6ee3cc915f68b5b70274eec219b7fd6799479459 - src/nvidia/src/kernel/rmapi/rmapi_cache.c 7a4abc27bdbcbb758545783f4182f200587ae3bd - src/nvidia/src/kernel/rmapi/binary_api.c f821719c449e0300a3c27ebeaa3f4d6791ddaf60 - src/nvidia/src/kernel/rmapi/alloc_free.c b7561ece996380512992736f947ddea0ba7f075e - src/nvidia/src/kernel/rmapi/resource_desc.h 72a6ae5bcae8eb4197047aaa5c1780b689544c87 - src/nvidia/src/kernel/rmapi/entry_points.h 4fbbb955e617d7b014e201a5263915939c87f884 - src/nvidia/src/kernel/rmapi/resource_list_required_includes.h a16bffcad38862470b4424fa9a1b0d4013304600 - src/nvidia/src/kernel/core/hal_mgr.c 4d3f32dbc4cbe3d4d1301079eaf21005f74dea90 - src/nvidia/src/kernel/core/locks_common.c e7195ca43692b6fbf6a3533437650c596cee88db - src/nvidia/src/kernel/core/locks_minimal.c ee0bf4f81d33e9a7b6bbb2be27bb3973c8cb5b18 - src/nvidia/src/kernel/core/system.c 905a0f08067503374c757ed34d1ea87379ab4a71 - src/nvidia/src/kernel/core/thread_state.c afa03f17393b28b9fc791bf09c4d35833447808d - src/nvidia/src/kernel/core/hal/hal.c d3922085d63a7edf02b582fe0b6e3acba6124c25 - src/nvidia/src/kernel/core/hal/hals_all.c 8eac3ea49f9a53063f7106211e5236372d87bdaf - src/nvidia/src/kernel/core/hal/info_block.c 1f258d22d361a8902c27a4329e553a73b3fbe6e9 - src/nvidia/src/kernel/gpu/device.c f520afc43afd9e40f779d2bdf3acc48ff7419625 - src/nvidia/src/kernel/gpu/eng_state.c 7ed54a614b756e32a61366d2009db26d1ef5fcc4 - src/nvidia/src/kernel/gpu/gpu_arch.c 1b2a50c873087a28cc4edd4a65945bcafc84bcf0 - src/nvidia/src/kernel/gpu/gpu_uuid.c 5bbac8b7323fe7f048e54b2ebc3ebe4f30655181 - src/nvidia/src/kernel/gpu/gpu.c c7f5b73c217a181f5ff28886bf691ec7d528cb86 - src/nvidia/src/kernel/gpu/gpu_resource.c 2408846a2a5c24a102df13919f384c6675f56f29 - src/nvidia/src/kernel/gpu/device_ctrl.c 2b40a86a112c7643a69b094194c2ee1dd294f16a - src/nvidia/src/kernel/gpu/gpu_gspclient.c 261a5b014b3869c3ce5e830cf8b9529fa0b8a09d - src/nvidia/src/kernel/gpu/gpu_resource_desc.c 4e1be780ac696a61f056933e5550040a2d42c6bd - src/nvidia/src/kernel/gpu/gpu_device_mapping.c 57941830e179d534a7329608658c82fd91ff4a57 - src/nvidia/src/kernel/gpu/gpu_timeout.c 89a6229720a7d5276d73ad51a210ce6f60cedb08 - src/nvidia/src/kernel/gpu/gpu_user_shared_data.c bc508781e640dbf756d9c9e43e75227d05b413c7 - src/nvidia/src/kernel/gpu/device_share.c 84c2c6a59313d36aa70c8a01cfedf1d1e7a3d931 - src/nvidia/src/kernel/gpu/gpu_access.c d0d744c416a52404a52c35ede015629990934003 - src/nvidia/src/kernel/gpu/gpu_engine_type.c 12c1f9494317c34b1b9bfcc58bf7bee81b08c98e - src/nvidia/src/kernel/gpu/gpu_t234d_kernel.c ea626b20043182e3b374cb05d02c75b482fcd3a3 - src/nvidia/src/kernel/gpu/gpu_rmapi.c 099da8d641fb4481f9a4c625588dd4aa4ce20bcd - src/nvidia/src/kernel/gpu/subdevice/subdevice.c 6fab19f1f68bdb8d2b969efc6f030e2066bc6b5e - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c b4e503b320119fecdb22dfda1268ce31e1a7ecd7 - src/nvidia/src/kernel/gpu/subdevice/generic_engine.c 9afe5cedd5e7d535ee56f4f5b3cc549f154d8be2 - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c 796d1368584a9318a39ed313dcb86bbcca40ad83 - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c 4c363a34fe12b9bb0d428c3d90974d7085d0366f - src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_internal_kernel.c fcf79cf10019193a9e57f8d19b5a37bac6120365 - src/nvidia/src/kernel/gpu/arch/t25x/kern_gpu_t256d.c 095d4a87b067038bd2d80a1c4b2d9407810b0e66 - src/nvidia/src/kernel/gpu/arch/t26x/kern_gpu_t264d.c c20ed8bd9fda88b036c6ff677b7c25ebd171434f - src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_arch_t234d.c b09af17437a01e63e960414a4534074da240dc59 - src/nvidia/src/kernel/gpu/arch/t23x/kern_gpu_t234d.c ceb516c8064e1df2d18897f98f5c8ea58e907973 - src/nvidia/src/kernel/gpu/disp/disp_capabilities.c c67baeb5df33080d99f322786759fc3f5436301d - src/nvidia/src/kernel/gpu/disp/disp_channel.c 8fafebf746bfcde2c53435be386a8a0846973b0c - src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c 6437dd659a38c62cd81fb59f229bd94e59f37e71 - src/nvidia/src/kernel/gpu/disp/disp_sf_user.c 0fbfb9dd91147f04bea1060788efc1121078c159 - src/nvidia/src/kernel/gpu/disp/kern_disp.c 5aa67b54fcd16f648d7a72b9c2c4ff3fb6d3a5be - src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c 56027ec220553e1febe42f37fd70757cbb034dcb - src/nvidia/src/kernel/gpu/disp/disp_objs.c b95080033ecc8736a0cdf9476cec7563c4a2af0f - src/nvidia/src/kernel/gpu/disp/vblank_callback/vblank.c caba45a10f43e7817f491e7856ef30dd49782f6e - src/nvidia/src/kernel/gpu/disp/head/kernel_head.c f59763139d9993ae545ded8057706cc4d65afc0c - src/nvidia/src/kernel/gpu/disp/head/arch/v04/kernel_head_0401.c eb00ffa5a892558d39db15f473e2c308acfd86d9 - src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0404.c 2b19caf7def14190c99dc4e41983b4a3e3334f22 - src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0401.c 6d99d644a8294d08b0fdebf183306bbdadf819e3 - src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0402.c 57fec208154cd0d25838a688f6457598baf2de7a - src/nvidia/src/kernel/gpu/disp/arch/v02/kern_disp_0204.c 64aa574198449e9556328d1c08f08b3bde5bfad0 - src/nvidia/src/kernel/gpu/disp/arch/v05/kern_disp_0501.c d911e6ae9f7b96e6f441208d38701a8d833e7455 - src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c ae5ef73d6e74026e0b847977c41b92cbf0f30a62 - src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c 4cfab589176c432463859f148ad32c7dac2c83d3 - src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c 60e8d1fa9cd375be783c4575baa2e99ac2b22a88 - src/nvidia/src/kernel/gpu/timer/timer.c f6e518524581b772f8fdbc80418a2018570940ca - src/nvidia/src/kernel/gpu/timer/timer_ostimer.c 1f4d15f959df38f4f6ea48c7b10fc859c6e04b12 - src/nvidia/src/kernel/gpu/audio/hda_codec_api.c 10a8bfd47ce609763c07a0d61be2f71f9f91889e - src/nvidia/src/kernel/gpu/mem_mgr/mem_ctrl.c bfc82499a8b9b8ce10411f6c391b0e575dc7c0d6 - src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c a62f423d6cf69e96b0523a233ec00353d63ee8bd - src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c 92611eb4f3bed31064a9efbb54a1ece7ffcfc2af - src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c 4a95b73f744807d96510b0ad7181eae5b12839ce - src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c ce09583697a98a2d0e8466dd45764f15945f55c2 - src/nvidia/src/kernel/gpu/dce_client/dce_client_rpc.c cebb9eee63e23bb934881b3313e422b50fb38abb - src/nvidia/src/kernel/gpu/dce_client/dce_client.c d5d8ff429d3bda7103bafcb2dca94678efc8ddd8 - src/nvidia/src/kernel/gpu_mgr/gpu_group.c 2b49d8a3413a1731bc4fb0bab3f32ff272a71a8c - src/nvidia/src/kernel/gpu_mgr/gpu_db.c 37d1e3dd86e6409b8e461f90386e013194c9e4d1 - src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c fe618e428d9a172a0fd9412f5a20df64d7270418 - src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c 593bbc5b93b620019144fadf1281a180ec050012 - src/nvidia/src/kernel/mem_mgr/syncpoint_mem.c 54c1d1a44474a7027c5290551e60f13678226301 - src/nvidia/src/kernel/mem_mgr/standard_mem.c 44069d6ebbd94a11267e6cc0179ab167f91faec4 - src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c 5a5e689cf264134ae8c4300d986c209c04167743 - src/nvidia/src/kernel/mem_mgr/vaspace.c 5b9048e62581a3fbb0227d1a46c4ee8d8397bf5b - src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h 630200d06b6588d7fa8c5b1ea16146e8281163d7 - src/nvidia/src/kernel/mem_mgr/io_vaspace.c 04876ed2dedf0ac3228ec6261a0f3f79609e44a5 - src/nvidia/src/kernel/mem_mgr/system_mem.c 873de51b330501a86ec7656fcf3f615034c49f8e - src/nvidia/src/kernel/mem_mgr/os_desc_mem.c ed8376f04af08af8da7d47c6340ff38a8910de87 - src/nvidia/src/kernel/mem_mgr/mem.c 08762b3172f6309f1aeab895761193fa19cb176f - src/nvidia/interface/nv_sriov_defines.h 024b112ea410ee1b1badb585b03fdbabb64ade34 - src/nvidia/interface/nvrm_registry.h 3f7b20e27e6576ee1f2f0557d269697a0b8af7ec - src/nvidia/interface/nv-firmware-registry.h d02ee5bb3f19dffd8b5c30dc852cea243bcdf399 - src/nvidia/interface/acpidsmguids.h 60c7cafce7bd5240e8409e3c5b71214262347efc - src/nvidia/interface/acpigenfuncs.h bff92c9767308a13df1d0858d5f9c82af155679a - src/nvidia/interface/nvacpitypes.h 7790849d0d261e84d04ab5a481bb57309de6409a - src/nvidia/interface/deprecated/rmapi_deprecated_utils.c 82f65de514ef7e2204cfb618d398cf3af8c12778 - src/nvidia/interface/deprecated/rmapi_deprecated.h 49e299b7257e179b701747e061b6b0214d5565f0 - src/nvidia/interface/rmapi/src/g_finn_rm_api.c 7b8431767b7c4b3861582ddab27a079568bf0660 - src/nvidia-modeset/Makefile 7e1249c1d187aec5891eabe5bacae2189d33dc55 - src/nvidia-modeset/lib/nvkms-sync.c c3ab6005d7083e90145cac66addf815c4f93d9a0 - src/nvidia-modeset/lib/nvkms-format.c f69ac0ec080036b8abc7f1ae7b857989f5c9df4a - src/nvidia-modeset/include/nvkms-headsurface-3d.h b8854261256a801af52d1201081afa9c17486a96 - src/nvidia-modeset/include/nvkms-3dvision.h 3212e81bcde5a5dcec5dbba4155a41ca52dd2304 - src/nvidia-modeset/include/nvkms-prealloc.h 24aaf3a4cb16be7a5aaa8317090142743e3dd797 - src/nvidia-modeset/include/nvkms-flip-workarea.h be6cff078fcf66221762a4af1515e01d294dd2f6 - src/nvidia-modeset/include/nvkms-push.h 4361f10ff446c401c3f52bf36aed52ca24706d49 - src/nvidia-modeset/include/nvkms-vrr.h 08aa0dd2f18a8cf74539ea8b25ef3f3646567a0c - src/nvidia-modeset/include/nvkms-evo1.h 9bfb2d12ecdaecaba7eaaffa3040ab142d37f892 - src/nvidia-modeset/include/nvkms-prealloc-types.h 0bd9cf097cfa373f0bed7be8fe5299e2ea4bf669 - src/nvidia-modeset/include/g_nvkms-evo-states.h 708e037052ea0b3d6309fa44a205282b7a69a331 - src/nvidia-modeset/include/nvkms-difr.h 412d8028a548e67e9ef85cb7d3f88385e70c56f9 - src/nvidia-modeset/include/nvkms-console-restore.h 52b6d1a1a6793d232571e6366709436b018ae3b7 - src/nvidia-modeset/include/nvkms-dpy.h 81fcc817dfb8ae1f98b63d2c1acacc303fedb554 - src/nvidia-modeset/include/nvkms-dpy-override.h 0f251b41b076bb80eeebf7d54e6fd6c764404c28 - src/nvidia-modeset/include/nvkms-evo-states.h 70d9251f331bbf28f5c5bbdf939ebad94db9362d - src/nvidia-modeset/include/nvkms-softfloat.h 6e3681d5caa36312804c91630eaaf510eda897d2 - src/nvidia-modeset/include/nvkms-dma.h eb5248c4b0b51e7aecd2de87e496253b3b235c70 - src/nvidia-modeset/include/nvkms-utils-flip.h 377dd4a29b2ea5937a9b8fc3fba0c9e4ef92992e - src/nvidia-modeset/include/nvkms-cursor.h e1225d674a0e6e58110750868c45a4655110a4d8 - src/nvidia-modeset/include/nvkms-headsurface-swapgroup.h 9e3d50761d3a27c1db3085ff82b7d194ff47bf34 - src/nvidia-modeset/include/nvkms-rm.h fd9fa6da0fc28b00be524b0bed25a68c56278363 - src/nvidia-modeset/include/nvkms-modeset.h be6e0e97c1e7ffc0daa2f14ef7b05b9f9c11dc16 - src/nvidia-modeset/include/nvkms-attributes.h e30d9c286263051d14a1862f0c630295a78abde7 - src/nvidia-modeset/include/nvkms-headsurface-priv.h 3fd0822b8b44d13685ecde9d02300e6cfbb123db - src/nvidia-modeset/include/nvkms-hdmi.h 6b21a68e254becdd2641bc456f194f54c23abe51 - src/nvidia-modeset/include/nvkms-framelock.h 53122264a19ea00ef26e6accde3a3a7570e46b15 - src/nvidia-modeset/include/nvkms-vblank-sem-control.h 1b21352fd9d0b1c5708cb8512acf20ba2e13955d - src/nvidia-modeset/include/nvkms-headsurface.h 59d20eff40e4e488eb3ab7c97b5e171142dcdbcf - src/nvidia-modeset/include/nvkms-modeset-workarea.h 933f9b359a1c3807771e2719c6dd80d71beff3c8 - src/nvidia-modeset/include/nvkms-utils.h f5f3b11c78a8b0eef40c09e1751615a47f516edb - src/nvidia-modeset/include/nvkms-hal.h 03f3fd4c2fb7db83441805a5c350b121bd3117b4 - src/nvidia-modeset/include/nvkms-setlut-workarea.h 31acf6af2a4c82e3429efa77d110cb346c11905f - src/nvidia-modeset/include/nvkms-lut.h e4bae9a0df729119071902f7ad59704c97adee0e - src/nvidia-modeset/include/nvkms-private.h fbe2cbfd32b40d8188c6b25716fb360720ab5760 - src/nvidia-modeset/include/nvkms-evo.h 04f2e01c7f798a615319accc2dd713f617a81172 - src/nvidia-modeset/include/nvkms-headsurface-config.h 4a94381bd8c24b09193577d3f05d6d61f178e1cf - src/nvidia-modeset/include/nvkms-ctxdma.h b4d53599736b03ee1bc149abe7b602336f40295c - src/nvidia-modeset/include/nvkms-flip.h 46fc0e138ba7be5fa3ea0ada3ee0a78656950c80 - src/nvidia-modeset/include/nvkms-modeset-types.h 260b6ef87c755e55a803adad4ce49f2d57315f9a - src/nvidia-modeset/include/nvkms-event.h 35fa1444c57f7adbbddddc612237f3ad38cdd78f - src/nvidia-modeset/include/nvkms-rmapi.h 8782df838ea3d2617e9842c89389f51137b19a73 - src/nvidia-modeset/include/nvkms-headsurface-matrix.h 881d7e4187ff9c7e9d02672aedafc1605f3055ec - src/nvidia-modeset/include/nvkms-modepool.h 60c01e29aa91aa80bf3750a1b11fe61a6cdfde58 - src/nvidia-modeset/include/nvkms-types.h cc3dc4021b76782434efd2aa81d3ffdd1f3b1f0a - src/nvidia-modeset/include/nvkms-headsurface-ioctl.h 3dc2113c55970fa70b7afb4fd30f2f1e777ebc12 - src/nvidia-modeset/include/nvkms-surface.h aa43ad7f970331c56378b7797f66b0a77d8e99dd - src/nvidia-modeset/include/nvkms-evo3.h 8c7e0e15c1038fe518e98d8f86fafb250b10a1d2 - src/nvidia-modeset/include/nvkms-stereo.h 9deeeae9081fd828a14f3b0df5fbf17a81161786 - src/nvidia-modeset/include/nvkms-hw-flip.h 6460f8427fdb375d659975c7f6eaadaca0ed2b2c - src/nvidia-modeset/include/dp/nvdp-device.h 1912d523f567c4fc36075942cf8acaf5d5478232 - src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h a233bdcd5daa0582acf2cd5b0f339ad54d09bf13 - src/nvidia-modeset/include/dp/nvdp-timer.h 2b91423ff88ca398324088d4f910e81f6944123a - src/nvidia-modeset/include/dp/nvdp-connector.h aa8aa13c6fc48ff5ef621f243e94dcc01a46dea3 - src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h c0de6efe1d5c57da324118f108ea0570a6923036 - src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h b01351ece15ce0d54a19ad0d7ffa056963d72488 - src/nvidia-modeset/kapi/src/nvkms-kapi.c a4d52bb238ce94f3427f25bd169e58d5d5f4abd1 - src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c ce42ceac4c4cf9d249d66ab57ae2f435cd9623fc - src/nvidia-modeset/kapi/src/nvkms-kapi-sync.c 80c2c9a2a05beb0202239db8b0dd7080ff21c194 - src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h 4c856c1324060dcb5a9e72e5e82c7a60f6324733 - src/nvidia-modeset/kapi/interface/nvkms-kapi.h 11af2aeea97398b58f628fe4685b5dfcfda5791b - src/nvidia-modeset/src/nvkms-modeset.c 016fd1b111731c6d323425d52bfe1a04d8bcade7 - src/nvidia-modeset/src/nvkms-headsurface-swapgroup.c 37a6d00e8721a9c4134810f8be3e7168f8cbb226 - src/nvidia-modeset/src/nvkms-evo.c 4758c601621603597bd2387c4f08b3fdc17e375d - src/nvidia-modeset/src/nvkms-hw-flip.c 5e3188c2d9b580ff69e45842f841f5c92c0c6edb - src/nvidia-modeset/src/nvkms-headsurface-ioctl.c e1a3c31638416a0132c5301fe5dd4b1c93f14376 - src/nvidia-modeset/src/nvkms-cursor3.c d48ff2da5fac6f8cd0522a25b947b5b8c01812ba - src/nvidia-modeset/src/nvkms-rm.c 30ad7839985dea46e6b6d43499210a3056da51ad - src/nvidia-modeset/src/nvkms-utils-flip.c 2c24667a18374ae967917df219f3775d9a79ae04 - src/nvidia-modeset/src/nvkms-headsurface-3d.c fb8b4aa1e36f23e1927be3dbd351ab0357aeb735 - src/nvidia-modeset/src/nvkms-evo3.c 9ce404d122bbdcd5f626f2c2b7ff08a9bfcf4045 - src/nvidia-modeset/src/nvkms-flip.c e5c96eb6b9884daf4a8d0d467b009008a45065b9 - src/nvidia-modeset/src/g_nvkms-evo-states.c 094c2169412cb577a6e9db9420da084264119284 - src/nvidia-modeset/src/nvkms-hal.c 1e0bf57319954911ddd2fe87b0cd05e257f1439e - src/nvidia-modeset/src/nvkms-surface.c bd2e4a6102432d4ac1faf92b5d3db29e9e3cfafc - src/nvidia-modeset/src/nvkms-utils.c 6d41c9f84cc9ce2d16812e94a3fba055b3fc7308 - src/nvidia-modeset/src/nvkms-conf.c 05bfe67d8cb956a666804b8f27e507bbd35e2c2d - src/nvidia-modeset/src/nvkms-difr.c 9a8746ee4a4e772b8ac13f06dc0de8a250fdb4c7 - src/nvidia-modeset/src/nvkms-ctxdma.c 382141f251ce64e2d33add3b89225c373da9ea7d - src/nvidia-modeset/src/nvkms-hdmi.c 2e1644a912e7a27ec04288e000c3fa5439eecb60 - src/nvidia-modeset/src/nvkms-headsurface-matrix.c 127a3f77febf09d56b6fe3534bc62ff0ffa535d8 - src/nvidia-modeset/src/nvkms-dpy.c e0756f45732035b1000a03bd8a995a46041904ae - src/nvidia-modeset/src/nvkms-vblank-sem-control.c e4044bb85de59d662d0d579771c076cbe9b10bbb - src/nvidia-modeset/src/nvkms.c 12cbc57714f458b5673115bb5c4d380509d05277 - src/nvidia-modeset/src/nvkms-cursor.c 5c93bc35d8f93330dd7a1f7808e39c6001ee83e8 - src/nvidia-modeset/src/nvkms-headsurface-config.c ed78249de63139ec2629bde58b616cef649281f1 - src/nvidia-modeset/src/nvkms-evo2.c c51c4f2e3ac11bf86d4549ce5e9d9010199e37dd - src/nvidia-modeset/src/nvkms-prealloc.c 9d38d5147d06a293a272087d78d0b96b6003f11e - src/nvidia-modeset/src/nvkms-attributes.c 65b02b48caff2a9100b8c5614f91d42fb20da9c0 - src/nvidia-modeset/src/nvkms-dpy-override.c a62b617aa5c89056c19a5f3c91402df8cfcc1103 - src/nvidia-modeset/src/nvkms-push.c 9fea40b7b55d6ebf3f73b5d469751c873ffbe7c0 - src/nvidia-modeset/src/nvkms-dma.c da726d20eea99a96af4c10aace88f419e8ee2a34 - src/nvidia-modeset/src/nvkms-event.c a1c7c3c1191762c0a1038674dee0075d532ccd2d - src/nvidia-modeset/src/nvkms-headsurface.c 2fabe1c14116a2b07f24d01710394ee84a6e3914 - src/nvidia-modeset/src/nvkms-3dvision.c 89b58b1e67ff7ed43c889fe7d85329d7f4762b91 - src/nvidia-modeset/src/nvkms-hw-states.c c799d52bdc792efc377fb5cd307b0eb445c44d6a - src/nvidia-modeset/src/nvkms-cursor2.c dd6c86b5557b02dd15a8ea0f10bde9770d90874e - src/nvidia-modeset/src/nvkms-evo4.c be49ea18102a44914e0d7686c51430df18336383 - src/nvidia-modeset/src/nvkms-framelock.c 6bdb90474b5d31c53104f7b29b447b3f798aaa0e - src/nvidia-modeset/src/nvkms-vrr.c 05ca4acdfeb9b99eccc7e222846fc688473322ae - src/nvidia-modeset/src/nvkms-rmapi-dgpu.c f754a27436fd1e1fa103de6110224c21ad7ea9f4 - src/nvidia-modeset/src/nvkms-pow.c e8c6d2eedfba19f8f06dd57f629588615cf1a2e9 - src/nvidia-modeset/src/nvkms-evo1.c d15f314bea66574e0ffc72966b86bae8366412f5 - src/nvidia-modeset/src/nvkms-console-restore.c 0699860902369359e5ff1a0ef46b87e955d4bb7a - src/nvidia-modeset/src/nvkms-modepool.c 403e6dbff0a607c2aecf3204c56633bd7b612ae2 - src/nvidia-modeset/src/nvkms-stereo.c fd6ecacc4f273c88960148c070dd17d93f49909b - src/nvidia-modeset/src/nvkms-lut.c 771fee54d1123871e380db6f3227b4946b6be647 - src/nvidia-modeset/src/dp/nvdp-timer.cpp 6b985fc50b5040ce1a81418bed73a60edb5d3289 - src/nvidia-modeset/src/dp/nvdp-timer.hpp dcf9f99e79a13b109a8665597f0fc7c00ec37957 - src/nvidia-modeset/src/dp/nvdp-connector.cpp e0e50fc1c526ecf0fe2f60689a25adda1257e2b3 - src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp 16081091156a813977dfdd0718d55ea4a66a0686 - src/nvidia-modeset/src/dp/nvdp-device.cpp 6e17f81da1b94414c1cbf18c3ea92f25352d8bf5 - src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp 81065db63fda6468fdf56d853781fca8af610798 - src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp e1f003a64cec57f299e65567d29e69951a62f44a - src/nvidia-modeset/src/dp/nvdp-host.cpp ca07b8e8f507de47694ac7b3b1719b0931da02c6 - src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp 2b49249a135293d01e82ef11ee520596c9825875 - src/nvidia-modeset/src/shaders/g_pascal_shaders 09cb78322cc8465d42a4be6a1c3682566c66462d - src/nvidia-modeset/src/shaders/g_maxwell_shaders a62c80e00077041d38d84e06c5834dca527e8a55 - src/nvidia-modeset/src/shaders/g_volta_shader_info.h 21cf709a8717d43c4abc6b66c8faad141592b7ce - src/nvidia-modeset/src/shaders/g_nvidia-headsurface-shader-info.h fec9074463a5505e300f9feb77b60ec77b781bb7 - src/nvidia-modeset/src/shaders/g_turing_shader_info.h cad54ab33c1132ba7453f54e9a02d34504e4fd5c - src/nvidia-modeset/src/shaders/g_pascal_shader_info.h f3bdeb7d46fdc9c31940ea799ce4a0d328fe1844 - src/nvidia-modeset/src/shaders/g_ampere_shaders 0ba4739302e0938b5599afb7e7ad281b21e25cec - src/nvidia-modeset/src/shaders/g_maxwell_shader_info.h 1c02043d31faf4f79c4a54dd5a622e87ee276be8 - src/nvidia-modeset/src/shaders/g_volta_shaders f540d144503d00941a1b32fb1a3d13061065b24e - src/nvidia-modeset/src/shaders/g_hopper_shader_info.h 74824b796722071bc3d90e4dacfed245dcda28cd - src/nvidia-modeset/src/shaders/g_turing_shaders ce728856b76bfa428b199fd3b97e0cbc24ef54cd - src/nvidia-modeset/src/shaders/g_hopper_shaders 02bb8bc0f5d228d4a9a383d797daffd8936c4ad7 - src/nvidia-modeset/src/shaders/g_ampere_shader_info.h 9f35175e44247d4facb26a60614d40fcdb74416f - src/nvidia-modeset/src/shaders/g_shader_names.h ca86fee8bd52e6c84e376199c5f3890078bc2031 - src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h b2a5ddfd8dcb3000b9d102bd55b5b560730e81d5 - src/nvidia-modeset/os-interface/include/nvkms.h 51b367a6e289cc8957388745988315024f97506e - src/nvidia-modeset/interface/nvkms-api.h b986bc6591ba17a74ad81ec4c93347564c6d5165 - src/nvidia-modeset/interface/nvkms-format.h 2ea1436104463c5e3d177e8574c3b4298976d37e - src/nvidia-modeset/interface/nvkms-ioctl.h 3bf4a2d1fec120ef5313c8bf119bc22fb3cf0cc5 - src/nvidia-modeset/interface/nvkms-modetimings.h c54c62de441828282db9a4f5b35c2fa5c97d94f1 - src/nvidia-modeset/interface/nvkms-api-types.h 8e3e74d2b3f45381e7b0012d930cf451cbd1728f - src/nvidia-modeset/interface/nvkms-sync.h Change-Id: If5ef3d3202eab829a730f4711eb572cfbfea8273
This commit is contained in:
329
kernel-open/nvidia-drm/nv-kthread-q.c
Normal file
329
kernel-open/nvidia-drm/nv-kthread-q.c
Normal file
@@ -0,0 +1,329 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nv-kthread-q.h"
|
||||
#include "nv-list-helpers.h"
|
||||
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/bug.h>
|
||||
|
||||
// Today's implementation is a little simpler and more limited than the
|
||||
// API description allows for in nv-kthread-q.h. Details include:
|
||||
//
|
||||
// 1. Each nv_kthread_q instance is a first-in, first-out queue.
|
||||
//
|
||||
// 2. Each nv_kthread_q instance is serviced by exactly one kthread.
|
||||
//
|
||||
// You can create any number of queues, each of which gets its own
|
||||
// named kernel thread (kthread). You can then insert arbitrary functions
|
||||
// into the queue, and those functions will be run in the context of the
|
||||
// queue's kthread.
|
||||
|
||||
#ifndef WARN
|
||||
// Only *really* old kernels (2.6.9) end up here. Just use a simple printk
|
||||
// to implement this, because such kernels won't be supported much longer.
|
||||
#define WARN(condition, format...) ({ \
|
||||
int __ret_warn_on = !!(condition); \
|
||||
if (unlikely(__ret_warn_on)) \
|
||||
printk(KERN_ERR format); \
|
||||
unlikely(__ret_warn_on); \
|
||||
})
|
||||
#endif
|
||||
|
||||
#define NVQ_WARN(fmt, ...) \
|
||||
do { \
|
||||
if (in_interrupt()) { \
|
||||
WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \
|
||||
##__VA_ARGS__); \
|
||||
} \
|
||||
else { \
|
||||
WARN(1, "nv_kthread_q: task: %s: " fmt, \
|
||||
current->comm, \
|
||||
##__VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
static int _main_loop(void *args)
|
||||
{
|
||||
nv_kthread_q_t *q = (nv_kthread_q_t *)args;
|
||||
nv_kthread_q_item_t *q_item = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
while (1) {
|
||||
// Normally this thread is never interrupted. However,
|
||||
// down_interruptible (instead of down) is called here,
|
||||
// in order to avoid being classified as a potentially
|
||||
// hung task, by the kernel watchdog.
|
||||
while (down_interruptible(&q->q_sem))
|
||||
NVQ_WARN("Interrupted during semaphore wait\n");
|
||||
|
||||
if (atomic_read(&q->main_loop_should_exit))
|
||||
break;
|
||||
|
||||
spin_lock_irqsave(&q->q_lock, flags);
|
||||
|
||||
// The q_sem semaphore prevents us from getting here unless there is
|
||||
// at least one item in the list, so an empty list indicates a bug.
|
||||
if (unlikely(list_empty(&q->q_list_head))) {
|
||||
spin_unlock_irqrestore(&q->q_lock, flags);
|
||||
NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Consume one item from the queue
|
||||
q_item = list_first_entry(&q->q_list_head,
|
||||
nv_kthread_q_item_t,
|
||||
q_list_node);
|
||||
|
||||
list_del_init(&q_item->q_list_node);
|
||||
|
||||
spin_unlock_irqrestore(&q->q_lock, flags);
|
||||
|
||||
// Run the item
|
||||
q_item->function_to_run(q_item->function_args);
|
||||
|
||||
// Make debugging a little simpler by clearing this between runs:
|
||||
q_item = NULL;
|
||||
}
|
||||
|
||||
while (!kthread_should_stop())
|
||||
schedule();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nv_kthread_q_stop(nv_kthread_q_t *q)
|
||||
{
|
||||
// check if queue has been properly initialized
|
||||
if (unlikely(!q->q_kthread))
|
||||
return;
|
||||
|
||||
nv_kthread_q_flush(q);
|
||||
|
||||
// If this assertion fires, then a caller likely either broke the API rules,
|
||||
// by adding items after calling nv_kthread_q_stop, or possibly messed up
|
||||
// with inadequate flushing of self-rescheduling q_items.
|
||||
if (unlikely(!list_empty(&q->q_list_head)))
|
||||
NVQ_WARN("list not empty after flushing\n");
|
||||
|
||||
if (likely(!atomic_read(&q->main_loop_should_exit))) {
|
||||
|
||||
atomic_set(&q->main_loop_should_exit, 1);
|
||||
|
||||
// Wake up the kthread so that it can see that it needs to stop:
|
||||
up(&q->q_sem);
|
||||
|
||||
kthread_stop(q->q_kthread);
|
||||
q->q_kthread = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by
|
||||
// kthread_create_on_node relies on a 2 entry, per-core cache to minimize
|
||||
// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the
|
||||
// stack location ends up being a function of the core assigned to the current
|
||||
// thread, instead of being a function of the specified NUMA node. The cache was
|
||||
// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0
|
||||
// ("fork: Optimize task creation by caching two thread stacks per CPU if
|
||||
// CONFIG_VMAP_STACK=y")
|
||||
//
|
||||
// To work around the problematic cache, we create up to three kernel threads
|
||||
// -If the first thread's stack is resident on the preferred node, return this
|
||||
// thread.
|
||||
// -Otherwise, create a second thread. If its stack is resident on the
|
||||
// preferred node, stop the first thread and return this one.
|
||||
// -Otherwise, create a third thread. The stack allocator does not find a
|
||||
// cached stack, and so falls back to vmalloc, which takes the NUMA hint into
|
||||
// consideration. The first two threads are then stopped.
|
||||
//
|
||||
// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned.
|
||||
//
|
||||
// This function is never invoked when there is no NUMA preference (preferred
|
||||
// node is NUMA_NO_NODE).
|
||||
static struct task_struct *thread_create_on_node(int (*threadfn)(void *data),
|
||||
nv_kthread_q_t *q,
|
||||
int preferred_node,
|
||||
const char *q_name)
|
||||
{
|
||||
|
||||
unsigned i, j;
|
||||
static const unsigned attempts = 3;
|
||||
struct task_struct *thread[3];
|
||||
|
||||
for (i = 0;; i++) {
|
||||
struct page *stack;
|
||||
|
||||
thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name);
|
||||
|
||||
if (unlikely(IS_ERR(thread[i]))) {
|
||||
|
||||
// Instead of failing, pick the previous thread, even if its
|
||||
// stack is not allocated on the preferred node.
|
||||
if (i > 0)
|
||||
i--;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
// vmalloc is not used to allocate the stack, so simply return the
|
||||
// thread, even if its stack may not be allocated on the preferred node
|
||||
if (!is_vmalloc_addr(thread[i]->stack))
|
||||
break;
|
||||
|
||||
// Ran out of attempts - return thread even if its stack may not be
|
||||
// allocated on the preferred node
|
||||
if (i == (attempts - 1))
|
||||
break;
|
||||
|
||||
// Get the NUMA node where the first page of the stack is resident. If
|
||||
// it is the preferred node, select this thread.
|
||||
stack = vmalloc_to_page(thread[i]->stack);
|
||||
if (page_to_nid(stack) == preferred_node)
|
||||
break;
|
||||
}
|
||||
|
||||
for (j = i; j > 0; j--)
|
||||
kthread_stop(thread[j - 1]);
|
||||
|
||||
return thread[i];
|
||||
}
|
||||
|
||||
int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node)
|
||||
{
|
||||
memset(q, 0, sizeof(*q));
|
||||
|
||||
INIT_LIST_HEAD(&q->q_list_head);
|
||||
spin_lock_init(&q->q_lock);
|
||||
sema_init(&q->q_sem, 0);
|
||||
|
||||
if (preferred_node == NV_KTHREAD_NO_NODE) {
|
||||
q->q_kthread = kthread_create(_main_loop, q, q_name);
|
||||
}
|
||||
else {
|
||||
q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name);
|
||||
}
|
||||
|
||||
if (IS_ERR(q->q_kthread)) {
|
||||
int err = PTR_ERR(q->q_kthread);
|
||||
|
||||
// Clear q_kthread before returning so that nv_kthread_q_stop() can be
|
||||
// safely called on it making error handling easier.
|
||||
q->q_kthread = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
wake_up_process(q->q_kthread);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
|
||||
{
|
||||
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
|
||||
}
|
||||
|
||||
// Returns true (non-zero) if the item was actually scheduled, and false if the
|
||||
// item was already pending in a queue.
|
||||
static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 1;
|
||||
|
||||
spin_lock_irqsave(&q->q_lock, flags);
|
||||
|
||||
if (likely(list_empty(&q_item->q_list_node)))
|
||||
list_add_tail(&q_item->q_list_node, &q->q_list_head);
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
spin_unlock_irqrestore(&q->q_lock, flags);
|
||||
|
||||
if (likely(ret))
|
||||
up(&q->q_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item,
|
||||
nv_q_func_t function_to_run,
|
||||
void *function_args)
|
||||
{
|
||||
INIT_LIST_HEAD(&q_item->q_list_node);
|
||||
q_item->function_to_run = function_to_run;
|
||||
q_item->function_args = function_args;
|
||||
}
|
||||
|
||||
// Returns true (non-zero) if the q_item got scheduled, false otherwise.
|
||||
int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q,
|
||||
nv_kthread_q_item_t *q_item)
|
||||
{
|
||||
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
|
||||
NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was "
|
||||
"called with a non-alive q: 0x%p\n", q);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return _raw_q_schedule(q, q_item);
|
||||
}
|
||||
|
||||
static void _q_flush_function(void *args)
|
||||
{
|
||||
struct completion *completion = (struct completion *)args;
|
||||
complete(completion);
|
||||
}
|
||||
|
||||
|
||||
static void _raw_q_flush(nv_kthread_q_t *q)
|
||||
{
|
||||
nv_kthread_q_item_t q_item;
|
||||
DECLARE_COMPLETION_ONSTACK(completion);
|
||||
|
||||
nv_kthread_q_item_init(&q_item, _q_flush_function, &completion);
|
||||
|
||||
_raw_q_schedule(q, &q_item);
|
||||
|
||||
// Wait for the flush item to run. Once it has run, then all of the
|
||||
// previously queued items in front of it will have run, so that means
|
||||
// the flush is complete.
|
||||
wait_for_completion(&completion);
|
||||
}
|
||||
|
||||
void nv_kthread_q_flush(nv_kthread_q_t *q)
|
||||
{
|
||||
if (unlikely(atomic_read(&q->main_loop_should_exit))) {
|
||||
NVQ_WARN("Not allowed: nv_kthread_q_flush was called after "
|
||||
"nv_kthread_q_stop. q: 0x%p\n", q);
|
||||
return;
|
||||
}
|
||||
|
||||
// This 2x flush is not a typing mistake. The queue really does have to be
|
||||
// flushed twice, in order to take care of the case of a q_item that
|
||||
// reschedules itself.
|
||||
_raw_q_flush(q);
|
||||
_raw_q_flush(q);
|
||||
}
|
||||
90
kernel-open/nvidia-drm/nv-pci-table.c
Normal file
90
kernel-open/nvidia-drm/nv-pci-table.c
Normal file
@@ -0,0 +1,90 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "nv-pci-table.h"
|
||||
#include "cpuopsys.h"
|
||||
|
||||
#if defined(NV_BSD)
|
||||
/* Define PCI classes that FreeBSD's linuxkpi is missing */
|
||||
#define PCI_VENDOR_ID_NVIDIA 0x10de
|
||||
#define PCI_CLASS_DISPLAY_VGA 0x0300
|
||||
#define PCI_CLASS_DISPLAY_3D 0x0302
|
||||
#define PCI_CLASS_BRIDGE_OTHER 0x0680
|
||||
#endif
|
||||
|
||||
/* Devices supported by RM */
|
||||
struct pci_device_id nv_pci_table[] = {
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_NVIDIA,
|
||||
.device = PCI_ANY_ID,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.class = (PCI_CLASS_DISPLAY_VGA << 8),
|
||||
.class_mask = ~0
|
||||
},
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_NVIDIA,
|
||||
.device = PCI_ANY_ID,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.class = (PCI_CLASS_DISPLAY_3D << 8),
|
||||
.class_mask = ~0
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
/* Devices supported by all drivers in nvidia.ko */
|
||||
struct pci_device_id nv_module_device_table[4] = {
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_NVIDIA,
|
||||
.device = PCI_ANY_ID,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.class = (PCI_CLASS_DISPLAY_VGA << 8),
|
||||
.class_mask = ~0
|
||||
},
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_NVIDIA,
|
||||
.device = PCI_ANY_ID,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.class = (PCI_CLASS_DISPLAY_3D << 8),
|
||||
.class_mask = ~0
|
||||
},
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_NVIDIA,
|
||||
.device = PCI_ANY_ID,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.class = (PCI_CLASS_BRIDGE_OTHER << 8),
|
||||
.class_mask = ~0
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
#if defined(NV_LINUX)
|
||||
MODULE_DEVICE_TABLE(pci, nv_module_device_table);
|
||||
#endif
|
||||
32
kernel-open/nvidia-drm/nv-pci-table.h
Normal file
32
kernel-open/nvidia-drm/nv-pci-table.h
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _NV_PCI_TABLE_H_
|
||||
#define _NV_PCI_TABLE_H_
|
||||
|
||||
#include <linux/pci.h>
|
||||
|
||||
extern struct pci_device_id nv_pci_table[];
|
||||
extern struct pci_device_id nv_module_device_table[4];
|
||||
|
||||
#endif /* _NV_PCI_TABLE_H_ */
|
||||
120
kernel-open/nvidia-drm/nv_common_utils.h
Normal file
120
kernel-open/nvidia-drm/nv_common_utils.h
Normal file
@@ -0,0 +1,120 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NV_COMMON_UTILS_H__
|
||||
#define __NV_COMMON_UTILS_H__
|
||||
|
||||
#include "nvtypes.h"
|
||||
#include "nvmisc.h"
|
||||
|
||||
#if !defined(TRUE)
|
||||
#define TRUE NV_TRUE
|
||||
#endif
|
||||
|
||||
#if !defined(FALSE)
|
||||
#define FALSE NV_FALSE
|
||||
#endif
|
||||
|
||||
#define NV_IS_UNSIGNED(x) ((__typeof__(x))-1 > 0)
|
||||
|
||||
/* Get the length of a statically-sized array. */
|
||||
#define ARRAY_LEN(_arr) (sizeof(_arr) / sizeof(_arr[0]))
|
||||
|
||||
#define NV_INVALID_HEAD 0xFFFFFFFF
|
||||
|
||||
#define NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION (~0)
|
||||
|
||||
#if !defined(NV_MIN)
|
||||
# define NV_MIN(a,b) (((a)<(b))?(a):(b))
|
||||
#endif
|
||||
|
||||
#define NV_MIN3(a,b,c) NV_MIN(NV_MIN(a, b), c)
|
||||
#define NV_MIN4(a,b,c,d) NV_MIN3(NV_MIN(a,b),c,d)
|
||||
|
||||
#if !defined(NV_MAX)
|
||||
# define NV_MAX(a,b) (((a)>(b))?(a):(b))
|
||||
#endif
|
||||
|
||||
#define NV_MAX3(a,b,c) NV_MAX(NV_MAX(a, b), c)
|
||||
#define NV_MAX4(a,b,c,d) NV_MAX3(NV_MAX(a,b),c,d)
|
||||
|
||||
static inline int NV_LIMIT_VAL_TO_MIN_MAX(int val, int min, int max)
|
||||
{
|
||||
if (val < min) {
|
||||
return min;
|
||||
}
|
||||
if (val > max) {
|
||||
return max;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
#define NV_ROUNDUP_DIV(x,y) ((x) / (y) + (((x) % (y)) ? 1 : 0))
|
||||
|
||||
/*
|
||||
* Macros used for computing palette entries:
|
||||
*
|
||||
* NV_UNDER_REPLICATE(val, source_size, result_size) expands a value
|
||||
* of source_size bits into a value of target_size bits by shifting
|
||||
* the source value into the high bits and replicating the high bits
|
||||
* of the value into the low bits of the result.
|
||||
*
|
||||
* PALETTE_DEPTH_SHIFT(val, w) maps a colormap entry for a component
|
||||
* that has w bits to an appropriate entry in a LUT of 256 entries.
|
||||
*/
|
||||
static inline unsigned int NV_UNDER_REPLICATE(unsigned short val,
|
||||
int source_size,
|
||||
int result_size)
|
||||
{
|
||||
return (val << (result_size - source_size)) |
|
||||
(val >> ((source_size << 1) - result_size));
|
||||
}
|
||||
|
||||
|
||||
static inline unsigned short PALETTE_DEPTH_SHIFT(unsigned short val, int depth)
|
||||
{
|
||||
return NV_UNDER_REPLICATE(val, depth, 8);
|
||||
}
|
||||
|
||||
/*
|
||||
* Use __builtin_ffs where it is supported, or provide an equivalent
|
||||
* implementation for platforms like riscv where it is not.
|
||||
*/
|
||||
#if defined(__GNUC__) && !NVCPU_IS_RISCV64
|
||||
static inline int nv_ffs(int x)
|
||||
{
|
||||
return __builtin_ffs(x);
|
||||
}
|
||||
#else
|
||||
static inline int nv_ffs(int x)
|
||||
{
|
||||
if (x == 0)
|
||||
return 0;
|
||||
|
||||
LOWESTBITIDX_32(x);
|
||||
|
||||
return 1 + x;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __NV_COMMON_UTILS_H__ */
|
||||
136
kernel-open/nvidia-drm/nvidia-dma-resv-helper.h
Normal file
136
kernel-open/nvidia-drm/nvidia-dma-resv-helper.h
Normal file
@@ -0,0 +1,136 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DMA_RESV_HELPER_H__
|
||||
#define __NVIDIA_DMA_RESV_HELPER_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
/*
|
||||
* linux/reservation.h is renamed to linux/dma-resv.h, by commit
|
||||
* 52791eeec1d9 (dma-buf: rename reservation_object to dma_resv)
|
||||
* in v5.4.
|
||||
*/
|
||||
|
||||
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
|
||||
#include <linux/dma-resv.h>
|
||||
#else
|
||||
#include <linux/reservation.h>
|
||||
#endif
|
||||
|
||||
#include <linux/dma-fence.h>
|
||||
|
||||
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
|
||||
typedef struct dma_resv nv_dma_resv_t;
|
||||
#else
|
||||
typedef struct reservation_object nv_dma_resv_t;
|
||||
#endif
|
||||
|
||||
static inline void nv_dma_resv_init(nv_dma_resv_t *obj)
|
||||
{
|
||||
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
|
||||
dma_resv_init(obj);
|
||||
#else
|
||||
reservation_object_init(obj);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nv_dma_resv_fini(nv_dma_resv_t *obj)
|
||||
{
|
||||
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
|
||||
dma_resv_fini(obj);
|
||||
#else
|
||||
reservation_object_init(obj);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nv_dma_resv_lock(nv_dma_resv_t *obj,
|
||||
struct ww_acquire_ctx *ctx)
|
||||
{
|
||||
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
|
||||
dma_resv_lock(obj, ctx);
|
||||
#else
|
||||
ww_mutex_lock(&obj->lock, ctx);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nv_dma_resv_unlock(nv_dma_resv_t *obj)
|
||||
{
|
||||
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
|
||||
dma_resv_unlock(obj);
|
||||
#else
|
||||
ww_mutex_unlock(&obj->lock);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int nv_dma_resv_reserve_fences(nv_dma_resv_t *obj,
|
||||
unsigned int num_fences,
|
||||
NvBool shared)
|
||||
{
|
||||
#if defined(NV_DMA_RESV_RESERVE_FENCES_PRESENT)
|
||||
return dma_resv_reserve_fences(obj, num_fences);
|
||||
#else
|
||||
if (shared) {
|
||||
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
|
||||
return dma_resv_reserve_shared(obj, num_fences);
|
||||
#elif defined(NV_RESERVATION_OBJECT_RESERVE_SHARED_HAS_NUM_FENCES_ARG)
|
||||
return reservation_object_reserve_shared(obj, num_fences);
|
||||
#else
|
||||
unsigned int i;
|
||||
for (i = 0; i < num_fences; i++) {
|
||||
reservation_object_reserve_shared(obj);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nv_dma_resv_add_excl_fence(nv_dma_resv_t *obj,
|
||||
struct dma_fence *fence)
|
||||
{
|
||||
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
|
||||
#if defined(NV_DMA_RESV_ADD_FENCE_PRESENT)
|
||||
dma_resv_add_fence(obj, fence, DMA_RESV_USAGE_WRITE);
|
||||
#else
|
||||
dma_resv_add_excl_fence(obj, fence);
|
||||
#endif
|
||||
#else
|
||||
reservation_object_add_excl_fence(obj, fence);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nv_dma_resv_add_shared_fence(nv_dma_resv_t *obj,
|
||||
struct dma_fence *fence)
|
||||
{
|
||||
#if defined(NV_LINUX_DMA_RESV_H_PRESENT)
|
||||
#if defined(NV_DMA_RESV_ADD_FENCE_PRESENT)
|
||||
dma_resv_add_fence(obj, fence, DMA_RESV_USAGE_READ);
|
||||
#else
|
||||
dma_resv_add_shared_fence(obj, fence);
|
||||
#endif
|
||||
#else
|
||||
reservation_object_add_shared_fence(obj, fence);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* __NVIDIA_DMA_RESV_HELPER_H__ */
|
||||
204
kernel-open/nvidia-drm/nvidia-drm-conftest.h
Normal file
204
kernel-open/nvidia-drm/nvidia-drm-conftest.h
Normal file
@@ -0,0 +1,204 @@
|
||||
/*
|
||||
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_CONFTEST_H__
|
||||
#define __NVIDIA_DRM_CONFTEST_H__
|
||||
|
||||
#include "conftest.h"
|
||||
#include "nvtypes.h"
|
||||
|
||||
/*
|
||||
* NOTE: This file is expected to get included at the top before including any
|
||||
* of linux/drm headers.
|
||||
*
|
||||
* The goal is to redefine refcount_dec_and_test and refcount_inc before
|
||||
* including drm header files, so that the drm macro/inline calls to
|
||||
* refcount_dec_and_test* and refcount_inc get redirected to
|
||||
* alternate implementation in this file.
|
||||
*/
|
||||
|
||||
#if NV_IS_EXPORT_SYMBOL_GPL_refcount_inc
|
||||
|
||||
#include <linux/refcount.h>
|
||||
|
||||
#define refcount_inc(__ptr) \
|
||||
do { \
|
||||
atomic_inc(&(__ptr)->refs); \
|
||||
} while(0)
|
||||
|
||||
#endif
|
||||
|
||||
#if NV_IS_EXPORT_SYMBOL_GPL_refcount_dec_and_test
|
||||
|
||||
#include <linux/refcount.h>
|
||||
|
||||
#define refcount_dec_and_test(__ptr) atomic_dec_and_test(&(__ptr)->refs)
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_FBDEV_GENERIC_SETUP_PRESENT) && \
|
||||
defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
|
||||
#define NV_DRM_FBDEV_AVAILABLE
|
||||
#define NV_DRM_FBDEV_GENERIC_AVAILABLE
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_FBDEV_TTM_SETUP_PRESENT) && \
|
||||
defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT)
|
||||
#if IS_ENABLED(CONFIG_DRM_TTM_HELPER)
|
||||
#define NV_DRM_FBDEV_AVAILABLE
|
||||
#define NV_DRM_FBDEV_TTM_AVAILABLE
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_CLIENT_SETUP_PRESENT) && \
|
||||
(defined(NV_DRM_APERTURE_REMOVE_CONFLICTING_PCI_FRAMEBUFFERS_PRESENT) || \
|
||||
defined(NV_APERTURE_REMOVE_CONFLICTING_PCI_DEVICES_PRESENT))
|
||||
// XXX remove dependency on DRM_TTM_HELPER by implementing nvidia-drm's own
|
||||
// .fbdev_probe callback that uses NVKMS kapi
|
||||
#if IS_ENABLED(CONFIG_DRM_TTM_HELPER)
|
||||
#define NV_DRM_FBDEV_AVAILABLE
|
||||
#define NV_DRM_CLIENT_AVAILABLE
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Adapt to quirks in FreeBSD's Linux kernel compatibility layer.
|
||||
*/
|
||||
#if defined(NV_BSD)
|
||||
|
||||
#include <linux/rwsem.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/lock.h>
|
||||
#include <sys/sx.h>
|
||||
|
||||
/* For nv_drm_gem_prime_force_fence_signal */
|
||||
#ifndef spin_is_locked
|
||||
#if ((__FreeBSD_version >= 1500000) && (__FreeBSD_version < 1500018)) || (__FreeBSD_version < 1401501)
|
||||
#define spin_is_locked(lock) mtx_owned(lock.m)
|
||||
#else
|
||||
#define spin_is_locked(lock) mtx_owned(lock)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef rwsem_is_locked
|
||||
#define rwsem_is_locked(sem) (((sem)->sx.sx_lock & (SX_LOCK_SHARED)) \
|
||||
|| ((sem)->sx.sx_lock & ~(SX_LOCK_FLAGMASK & ~SX_LOCK_SHARED)))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* FreeBSD does not define vm_flags_t in its linuxkpi, since there is already
|
||||
* a FreeBSD vm_flags_t (of a different size) and they don't want the names to
|
||||
* collide. Temporarily redefine it when including nv-mm.h
|
||||
*/
|
||||
#define vm_flags_t unsigned long
|
||||
#include "nv-mm.h"
|
||||
#undef vm_flags_t
|
||||
|
||||
/*
|
||||
* sys/nv.h and nvidia/nv.h have the same header guard
|
||||
* we need to clear it for nvlist_t to get loaded
|
||||
*/
|
||||
#undef _NV_H_
|
||||
#include <sys/nv.h>
|
||||
|
||||
/*
|
||||
* For now just use set_page_dirty as the lock variant
|
||||
* is not ported for FreeBSD. (in progress). This calls
|
||||
* vm_page_dirty. Used in nv-mm.h
|
||||
*/
|
||||
#define set_page_dirty_lock set_page_dirty
|
||||
|
||||
/*
|
||||
* FreeBSD does not implement drm_atomic_state_free, simply
|
||||
* default to drm_atomic_state_put
|
||||
*/
|
||||
#define drm_atomic_state_free drm_atomic_state_put
|
||||
|
||||
#if __FreeBSD_version < 1300000
|
||||
/* redefine LIST_HEAD_INIT to the linux version */
|
||||
#include <linux/list.h>
|
||||
#define LIST_HEAD_INIT(name) LINUX_LIST_HEAD_INIT(name)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* FreeBSD currently has only vmf_insert_pfn_prot defined, and it has a
|
||||
* static assert warning not to use it since all of DRM's usages are in
|
||||
* loops with the vm obj lock(s) held. Instead we should use the lkpi
|
||||
* function itself directly. For us none of this applies so we can just
|
||||
* wrap it in our own definition of vmf_insert_pfn
|
||||
*/
|
||||
#ifndef NV_VMF_INSERT_PFN_PRESENT
|
||||
#define NV_VMF_INSERT_PFN_PRESENT 1
|
||||
|
||||
#if __FreeBSD_version < 1300000
|
||||
#define VM_SHARED (1 << 17)
|
||||
|
||||
/* Not present in 12.2 */
|
||||
static inline vm_fault_t
|
||||
lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn, pgprot_t prot)
|
||||
{
|
||||
vm_object_t vm_obj = vma->vm_obj;
|
||||
vm_page_t page;
|
||||
vm_pindex_t pindex;
|
||||
|
||||
VM_OBJECT_ASSERT_WLOCKED(vm_obj);
|
||||
pindex = OFF_TO_IDX(addr - vma->vm_start);
|
||||
if (vma->vm_pfn_count == 0)
|
||||
vma->vm_pfn_first = pindex;
|
||||
MPASS(pindex <= OFF_TO_IDX(vma->vm_end));
|
||||
|
||||
page = vm_page_grab(vm_obj, pindex, VM_ALLOC_NORMAL);
|
||||
if (page == NULL) {
|
||||
page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn));
|
||||
vm_page_xbusy(page);
|
||||
if (vm_page_insert(page, vm_obj, pindex)) {
|
||||
vm_page_xunbusy(page);
|
||||
return (VM_FAULT_OOM);
|
||||
}
|
||||
page->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
pmap_page_set_memattr(page, pgprot2cachemode(prot));
|
||||
vma->vm_pfn_count++;
|
||||
|
||||
return (VM_FAULT_NOPAGE);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline vm_fault_t
|
||||
vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
||||
unsigned long pfn)
|
||||
{
|
||||
vm_fault_t ret;
|
||||
|
||||
VM_OBJECT_WLOCK(vma->vm_obj);
|
||||
ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, vma->vm_page_prot);
|
||||
VM_OBJECT_WUNLOCK(vma->vm_obj);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* defined(NV_BSD) */
|
||||
|
||||
#endif /* defined(__NVIDIA_DRM_CONFTEST_H__) */
|
||||
640
kernel-open/nvidia-drm/nvidia-drm-connector.c
Normal file
640
kernel-open/nvidia-drm/nvidia-drm-connector.c
Normal file
@@ -0,0 +1,640 @@
|
||||
/*
|
||||
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
|
||||
#include "nvidia-drm-helper.h"
|
||||
#include "nvidia-drm-priv.h"
|
||||
#include "nvidia-drm-connector.h"
|
||||
#include "nvidia-drm-crtc.h"
|
||||
#include "nvidia-drm-utils.h"
|
||||
#include "nvidia-drm-encoder.h"
|
||||
|
||||
/*
|
||||
* Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h")
|
||||
* moves a number of helper function definitions from
|
||||
* drm/drm_crtc_helper.h to a new drm_probe_helper.h.
|
||||
*/
|
||||
#if defined(NV_DRM_DRM_PROBE_HELPER_H_PRESENT)
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#endif
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
|
||||
static void nv_drm_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
|
||||
|
||||
drm_connector_unregister(connector);
|
||||
|
||||
drm_connector_cleanup(connector);
|
||||
|
||||
if (nv_connector->edid != NULL) {
|
||||
nv_drm_free(nv_connector->edid);
|
||||
}
|
||||
|
||||
nv_drm_free(nv_connector);
|
||||
}
|
||||
|
||||
static bool
|
||||
__nv_drm_detect_encoder(struct NvKmsKapiDynamicDisplayParams *pDetectParams,
|
||||
struct drm_connector *connector,
|
||||
struct drm_encoder *encoder)
|
||||
{
|
||||
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct nv_drm_encoder *nv_encoder;
|
||||
|
||||
/*
|
||||
* DVI-I connectors can drive both digital and analog
|
||||
* encoders. If a digital connection has been forced then
|
||||
* skip analog encoders.
|
||||
*/
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_DVII &&
|
||||
connector->force == DRM_FORCE_ON_DIGITAL &&
|
||||
encoder->encoder_type == DRM_MODE_ENCODER_DAC) {
|
||||
return false;
|
||||
}
|
||||
|
||||
nv_encoder = to_nv_encoder(encoder);
|
||||
|
||||
memset(pDetectParams, 0, sizeof(*pDetectParams));
|
||||
|
||||
pDetectParams->handle = nv_encoder->hDisplay;
|
||||
|
||||
switch (connector->force) {
|
||||
case DRM_FORCE_ON:
|
||||
case DRM_FORCE_ON_DIGITAL:
|
||||
pDetectParams->forceConnected = NV_TRUE;
|
||||
break;
|
||||
case DRM_FORCE_OFF:
|
||||
pDetectParams->forceDisconnected = NV_TRUE;
|
||||
break;
|
||||
case DRM_FORCE_UNSPECIFIED:
|
||||
break;
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_CONNECTOR_HAS_OVERRIDE_EDID)
|
||||
if (connector->override_edid) {
|
||||
#else
|
||||
if (drm_edid_override_connector_update(connector) > 0) {
|
||||
#endif
|
||||
const struct drm_property_blob *edid = connector->edid_blob_ptr;
|
||||
|
||||
if (edid->length <= sizeof(pDetectParams->edid.buffer)) {
|
||||
memcpy(pDetectParams->edid.buffer, edid->data, edid->length);
|
||||
pDetectParams->edid.bufferSize = edid->length;
|
||||
pDetectParams->overrideEdid = NV_TRUE;
|
||||
} else {
|
||||
WARN_ON(edid->length >
|
||||
sizeof(pDetectParams->edid.buffer));
|
||||
}
|
||||
}
|
||||
|
||||
if (!nvKms->getDynamicDisplayInfo(nv_dev->pDevice, pDetectParams)) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to detect display state");
|
||||
return false;
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_CONNECTOR_HAS_VRR_CAPABLE_PROPERTY)
|
||||
drm_connector_attach_vrr_capable_property(&nv_connector->base);
|
||||
drm_connector_set_vrr_capable_property(&nv_connector->base, pDetectParams->vrrSupported ? true : false);
|
||||
#endif
|
||||
|
||||
if (pDetectParams->connected) {
|
||||
if (!pDetectParams->overrideEdid && pDetectParams->edid.bufferSize) {
|
||||
|
||||
if ((nv_connector->edid = nv_drm_calloc(
|
||||
1,
|
||||
pDetectParams->edid.bufferSize)) != NULL) {
|
||||
|
||||
memcpy(nv_connector->edid,
|
||||
pDetectParams->edid.buffer,
|
||||
pDetectParams->edid.bufferSize);
|
||||
} else {
|
||||
NV_DRM_LOG_ERR("Out of Memory");
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static enum drm_connector_status __nv_drm_connector_detect_internal(
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
|
||||
|
||||
enum drm_connector_status status = connector_status_disconnected;
|
||||
|
||||
struct drm_encoder *detected_encoder = NULL;
|
||||
struct nv_drm_encoder *nv_detected_encoder = NULL;
|
||||
struct drm_encoder *encoder;
|
||||
|
||||
struct NvKmsKapiDynamicDisplayParams *pDetectParams = NULL;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
|
||||
|
||||
if (nv_connector->edid != NULL) {
|
||||
nv_drm_free(nv_connector->edid);
|
||||
nv_connector->edid = NULL;
|
||||
}
|
||||
|
||||
if ((pDetectParams = nv_drm_calloc(
|
||||
1,
|
||||
sizeof(*pDetectParams))) == NULL) {
|
||||
WARN_ON(pDetectParams == NULL);
|
||||
goto done;
|
||||
}
|
||||
|
||||
nv_drm_connector_for_each_possible_encoder(connector, encoder) {
|
||||
if (__nv_drm_detect_encoder(pDetectParams, connector, encoder)) {
|
||||
detected_encoder = encoder;
|
||||
break;
|
||||
}
|
||||
} nv_drm_connector_for_each_possible_encoder_end;
|
||||
|
||||
if (detected_encoder == NULL) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
nv_detected_encoder = to_nv_encoder(detected_encoder);
|
||||
|
||||
status = connector_status_connected;
|
||||
|
||||
nv_connector->nv_detected_encoder = nv_detected_encoder;
|
||||
|
||||
if (nv_connector->type == NVKMS_CONNECTOR_TYPE_DVI_I) {
|
||||
drm_object_property_set_value(
|
||||
&connector->base,
|
||||
dev->mode_config.dvi_i_subconnector_property,
|
||||
detected_encoder->encoder_type == DRM_MODE_ENCODER_DAC ?
|
||||
DRM_MODE_SUBCONNECTOR_DVIA :
|
||||
DRM_MODE_SUBCONNECTOR_DVID);
|
||||
}
|
||||
|
||||
done:
|
||||
|
||||
nv_drm_free(pDetectParams);
|
||||
|
||||
if (status == connector_status_disconnected &&
|
||||
nv_connector->modeset_permission_filep) {
|
||||
nv_drm_connector_revoke_permissions(dev, nv_connector);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nv_drm_connector_force(struct drm_connector *connector)
|
||||
{
|
||||
__nv_drm_connector_detect_internal(connector);
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
nv_drm_connector_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
return __nv_drm_connector_detect_internal(connector);
|
||||
}
|
||||
|
||||
static struct drm_connector_funcs nv_connector_funcs = {
|
||||
.destroy = nv_drm_connector_destroy,
|
||||
.reset = drm_atomic_helper_connector_reset,
|
||||
.force = __nv_drm_connector_force,
|
||||
.detect = nv_drm_connector_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
};
|
||||
|
||||
static int nv_drm_connector_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
|
||||
struct nv_drm_encoder *nv_detected_encoder =
|
||||
nv_connector->nv_detected_encoder;
|
||||
NvU32 modeIndex = 0;
|
||||
int count = 0;
|
||||
|
||||
|
||||
if (nv_connector->edid != NULL) {
|
||||
nv_drm_connector_update_edid_property(connector, nv_connector->edid);
|
||||
}
|
||||
|
||||
while (1) {
|
||||
struct drm_display_mode *mode;
|
||||
struct NvKmsKapiDisplayMode displayMode;
|
||||
NvBool valid = 0;
|
||||
NvBool preferredMode = NV_FALSE;
|
||||
int ret;
|
||||
|
||||
ret = nvKms->getDisplayMode(nv_dev->pDevice,
|
||||
nv_detected_encoder->hDisplay,
|
||||
modeIndex++, &displayMode, &valid,
|
||||
&preferredMode);
|
||||
|
||||
if (ret < 0) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to get mode at modeIndex %d of NvKmsKapiDisplay 0x%08x",
|
||||
modeIndex, nv_detected_encoder->hDisplay);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Is end of mode-list */
|
||||
|
||||
if (ret == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* Ignore invalid modes */
|
||||
|
||||
if (!valid) {
|
||||
continue;
|
||||
}
|
||||
|
||||
mode = drm_mode_create(connector->dev);
|
||||
|
||||
if (mode == NULL) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to create mode for NvKmsKapiDisplay 0x%08x",
|
||||
nv_detected_encoder->hDisplay);
|
||||
continue;
|
||||
}
|
||||
|
||||
nvkms_display_mode_to_drm_mode(&displayMode, mode);
|
||||
|
||||
if (preferredMode) {
|
||||
mode->type |= DRM_MODE_TYPE_PREFERRED;
|
||||
}
|
||||
|
||||
/* Add a mode to a connector's probed_mode list */
|
||||
|
||||
drm_mode_probed_add(connector, mode);
|
||||
|
||||
count++;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static int nv_drm_connector_mode_valid(struct drm_connector *connector,
|
||||
#if defined(NV_DRM_CONNECTOR_HELPER_FUNCS_MODE_VALID_HAS_CONST_MODE_ARG)
|
||||
const struct drm_display_mode *mode)
|
||||
#else
|
||||
struct drm_display_mode *mode)
|
||||
#endif
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct nv_drm_encoder *nv_detected_encoder =
|
||||
to_nv_connector(connector)->nv_detected_encoder;
|
||||
struct NvKmsKapiDisplayMode displayMode;
|
||||
|
||||
if (nv_detected_encoder == NULL) {
|
||||
return MODE_BAD;
|
||||
}
|
||||
|
||||
drm_mode_to_nvkms_display_mode(mode, &displayMode);
|
||||
|
||||
if (!nvKms->validateDisplayMode(nv_dev->pDevice,
|
||||
nv_detected_encoder->hDisplay,
|
||||
&displayMode)) {
|
||||
return MODE_BAD;
|
||||
}
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static struct drm_encoder*
|
||||
nv_drm_connector_best_encoder(struct drm_connector *connector)
|
||||
{
|
||||
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
|
||||
|
||||
if (nv_connector->nv_detected_encoder != NULL) {
|
||||
return &nv_connector->nv_detected_encoder->base;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG)
|
||||
static const NvU32 __nv_drm_connector_supported_colorspaces =
|
||||
BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
|
||||
BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT)
|
||||
static int
|
||||
__nv_drm_connector_atomic_check(struct drm_connector *connector,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_connector_state *new_connector_state =
|
||||
drm_atomic_get_new_connector_state(state, connector);
|
||||
struct drm_connector_state *old_connector_state =
|
||||
drm_atomic_get_old_connector_state(state, connector);
|
||||
struct nv_drm_device *nv_dev = to_nv_device(connector->dev);
|
||||
|
||||
struct drm_crtc *crtc = new_connector_state->crtc;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct nv_drm_crtc_state *nv_crtc_state;
|
||||
struct NvKmsKapiHeadRequestedConfig *req_config;
|
||||
|
||||
if (!crtc) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
|
||||
nv_crtc_state = to_nv_crtc_state(crtc_state);
|
||||
req_config = &nv_crtc_state->req_config;
|
||||
|
||||
/*
|
||||
* Override metadata for the entire head instead of allowing NVKMS to derive
|
||||
* it from the layers' metadata.
|
||||
*
|
||||
* This is the metadata that will sent to the display, and if applicable,
|
||||
* layers will be tone mapped to this metadata rather than that of the
|
||||
* display.
|
||||
*/
|
||||
req_config->flags.hdrInfoFrameChanged =
|
||||
!drm_connector_atomic_hdr_metadata_equal(old_connector_state,
|
||||
new_connector_state);
|
||||
if (new_connector_state->hdr_output_metadata &&
|
||||
new_connector_state->hdr_output_metadata->data) {
|
||||
|
||||
/*
|
||||
* Note that HDMI definitions are used here even though we might not
|
||||
* be using HDMI. While that seems odd, it is consistent with
|
||||
* upstream behavior.
|
||||
*/
|
||||
|
||||
struct hdr_output_metadata *hdr_metadata =
|
||||
new_connector_state->hdr_output_metadata->data;
|
||||
struct hdr_metadata_infoframe *info_frame =
|
||||
&hdr_metadata->hdmi_metadata_type1;
|
||||
unsigned int i;
|
||||
|
||||
if (hdr_metadata->metadata_type != HDMI_STATIC_METADATA_TYPE1) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(info_frame->display_primaries); i++) {
|
||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.displayPrimaries[i].x =
|
||||
info_frame->display_primaries[i].x;
|
||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.displayPrimaries[i].y =
|
||||
info_frame->display_primaries[i].y;
|
||||
}
|
||||
|
||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.whitePoint.x =
|
||||
info_frame->white_point.x;
|
||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.whitePoint.y =
|
||||
info_frame->white_point.y;
|
||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxDisplayMasteringLuminance =
|
||||
info_frame->max_display_mastering_luminance;
|
||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.minDisplayMasteringLuminance =
|
||||
info_frame->min_display_mastering_luminance;
|
||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxCLL =
|
||||
info_frame->max_cll;
|
||||
req_config->modeSetConfig.hdrInfoFrame.staticMetadata.maxFALL =
|
||||
info_frame->max_fall;
|
||||
|
||||
req_config->modeSetConfig.hdrInfoFrame.eotf = info_frame->eotf;
|
||||
|
||||
req_config->modeSetConfig.hdrInfoFrame.enabled = NV_TRUE;
|
||||
} else {
|
||||
req_config->modeSetConfig.hdrInfoFrame.enabled = NV_FALSE;
|
||||
}
|
||||
|
||||
req_config->flags.colorimetryChanged =
|
||||
(old_connector_state->colorspace != new_connector_state->colorspace);
|
||||
// When adding a case here, also add to __nv_drm_connector_supported_colorspaces
|
||||
switch (new_connector_state->colorspace) {
|
||||
case DRM_MODE_COLORIMETRY_DEFAULT:
|
||||
req_config->modeSetConfig.colorimetry =
|
||||
NVKMS_OUTPUT_COLORIMETRY_DEFAULT;
|
||||
break;
|
||||
case DRM_MODE_COLORIMETRY_BT2020_RGB:
|
||||
case DRM_MODE_COLORIMETRY_BT2020_YCC:
|
||||
// Ignore RGB/YCC
|
||||
// See https://patchwork.freedesktop.org/patch/525496/?series=111865&rev=4
|
||||
req_config->modeSetConfig.colorimetry =
|
||||
NVKMS_OUTPUT_COLORIMETRY_BT2100;
|
||||
break;
|
||||
default:
|
||||
// XXX HDR TODO: Add support for more color spaces
|
||||
NV_DRM_DEV_LOG_ERR(nv_dev, "Unsupported color space");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT) */
|
||||
|
||||
static const struct drm_connector_helper_funcs nv_connector_helper_funcs = {
|
||||
.get_modes = nv_drm_connector_get_modes,
|
||||
.mode_valid = nv_drm_connector_mode_valid,
|
||||
.best_encoder = nv_drm_connector_best_encoder,
|
||||
#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT)
|
||||
.atomic_check = __nv_drm_connector_atomic_check,
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct drm_connector*
|
||||
nv_drm_connector_new(struct drm_device *dev,
|
||||
NvU32 physicalIndex, NvKmsConnectorType type,
|
||||
NvBool internal,
|
||||
char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH])
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct nv_drm_connector *nv_connector = NULL;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if ((nv_connector = nv_drm_calloc(1, sizeof(*nv_connector))) == NULL) {
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if ((nv_connector->base.state =
|
||||
nv_drm_calloc(1, sizeof(*nv_connector->base.state))) == NULL) {
|
||||
goto failed_state_alloc;
|
||||
}
|
||||
nv_connector->base.state->connector = &nv_connector->base;
|
||||
|
||||
nv_connector->physicalIndex = physicalIndex;
|
||||
nv_connector->type = type;
|
||||
nv_connector->internal = internal;
|
||||
nv_connector->modeset_permission_filep = NULL;
|
||||
nv_connector->modeset_permission_crtc = NULL;
|
||||
|
||||
strcpy(nv_connector->dpAddress, dpAddress);
|
||||
|
||||
ret = drm_connector_init(
|
||||
dev,
|
||||
&nv_connector->base, &nv_connector_funcs,
|
||||
nvkms_connector_type_to_drm_connector_type(type, internal));
|
||||
|
||||
if (ret != 0) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to initialize connector created from physical index %u",
|
||||
nv_connector->physicalIndex);
|
||||
goto failed_connector_init;
|
||||
}
|
||||
|
||||
drm_connector_helper_add(&nv_connector->base, &nv_connector_helper_funcs);
|
||||
|
||||
nv_connector->base.polled = DRM_CONNECTOR_POLL_HPD;
|
||||
|
||||
if (nv_connector->type == NVKMS_CONNECTOR_TYPE_VGA) {
|
||||
nv_connector->base.polled =
|
||||
DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT)
|
||||
if (nv_connector->type == NVKMS_CONNECTOR_TYPE_HDMI) {
|
||||
#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG)
|
||||
if (drm_mode_create_hdmi_colorspace_property(
|
||||
&nv_connector->base,
|
||||
__nv_drm_connector_supported_colorspaces) == 0) {
|
||||
#else
|
||||
if (drm_mode_create_hdmi_colorspace_property(&nv_connector->base) == 0) {
|
||||
#endif
|
||||
drm_connector_attach_colorspace_property(&nv_connector->base);
|
||||
}
|
||||
drm_connector_attach_hdr_output_metadata_property(&nv_connector->base);
|
||||
} else if (nv_connector->type == NVKMS_CONNECTOR_TYPE_DP) {
|
||||
#if defined(NV_DRM_MODE_CREATE_DP_COLORSPACE_PROPERTY_HAS_SUPPORTED_COLORSPACES_ARG)
|
||||
if (drm_mode_create_dp_colorspace_property(
|
||||
&nv_connector->base,
|
||||
__nv_drm_connector_supported_colorspaces) == 0) {
|
||||
#else
|
||||
if (drm_mode_create_dp_colorspace_property(&nv_connector->base) == 0) {
|
||||
#endif
|
||||
drm_connector_attach_colorspace_property(&nv_connector->base);
|
||||
}
|
||||
drm_connector_attach_hdr_output_metadata_property(&nv_connector->base);
|
||||
}
|
||||
#endif /* defined(NV_DRM_CONNECTOR_ATTACH_HDR_OUTPUT_METADATA_PROPERTY_PRESENT) */
|
||||
|
||||
/* Register connector with DRM subsystem */
|
||||
|
||||
ret = drm_connector_register(&nv_connector->base);
|
||||
|
||||
if (ret != 0) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to register connector created from physical index %u",
|
||||
nv_connector->physicalIndex);
|
||||
goto failed_connector_register;
|
||||
}
|
||||
|
||||
return &nv_connector->base;
|
||||
|
||||
failed_connector_register:
|
||||
drm_connector_cleanup(&nv_connector->base);
|
||||
|
||||
failed_connector_init:
|
||||
nv_drm_free(nv_connector->base.state);
|
||||
|
||||
failed_state_alloc:
|
||||
nv_drm_free(nv_connector);
|
||||
|
||||
failed:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get connector with given physical index one exists. Otherwise, create and
|
||||
* return a new connector.
|
||||
*/
|
||||
struct drm_connector*
|
||||
nv_drm_get_connector(struct drm_device *dev,
|
||||
NvU32 physicalIndex, NvKmsConnectorType type,
|
||||
NvBool internal,
|
||||
char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH])
|
||||
{
|
||||
struct drm_connector *connector = NULL;
|
||||
struct drm_connector_list_iter conn_iter;
|
||||
drm_connector_list_iter_begin(dev, &conn_iter);
|
||||
|
||||
/* Lookup for existing connector with same physical index */
|
||||
drm_for_each_connector_iter(connector, &conn_iter) {
|
||||
struct nv_drm_connector *nv_connector = to_nv_connector(connector);
|
||||
|
||||
if (nv_connector->physicalIndex == physicalIndex) {
|
||||
BUG_ON(nv_connector->type != type ||
|
||||
nv_connector->internal != internal);
|
||||
|
||||
if (strcmp(nv_connector->dpAddress, dpAddress) == 0) {
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
connector = NULL;
|
||||
|
||||
done:
|
||||
drm_connector_list_iter_end(&conn_iter);
|
||||
|
||||
if (!connector) {
|
||||
connector = nv_drm_connector_new(dev,
|
||||
physicalIndex, type, internal,
|
||||
dpAddress);
|
||||
}
|
||||
|
||||
return connector;
|
||||
}
|
||||
|
||||
/*
|
||||
* Revoke the permissions on this connector.
|
||||
*/
|
||||
bool nv_drm_connector_revoke_permissions(struct drm_device *dev,
|
||||
struct nv_drm_connector* nv_connector)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
bool ret = true;
|
||||
|
||||
if (nv_connector->modeset_permission_crtc) {
|
||||
if (nv_connector->nv_detected_encoder) {
|
||||
ret = nvKms->revokePermissions(
|
||||
nv_dev->pDevice, nv_connector->modeset_permission_crtc->head,
|
||||
nv_connector->nv_detected_encoder->hDisplay);
|
||||
}
|
||||
nv_connector->modeset_permission_crtc->modeset_permission_filep = NULL;
|
||||
nv_connector->modeset_permission_crtc = NULL;
|
||||
}
|
||||
nv_connector->modeset_permission_filep = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
104
kernel-open/nvidia-drm/nvidia-drm-connector.h
Normal file
104
kernel-open/nvidia-drm/nvidia-drm-connector.h
Normal file
@@ -0,0 +1,104 @@
|
||||
/*
|
||||
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_CONNECTOR_H__
|
||||
#define __NVIDIA_DRM_CONNECTOR_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
|
||||
#if defined(NV_DRM_DRMP_H_PRESENT)
|
||||
#include <drm/drmP.h>
|
||||
#endif
|
||||
|
||||
#include <drm/drm_connector.h>
|
||||
|
||||
#include "nvtypes.h"
|
||||
#include "nvkms-api-types.h"
|
||||
|
||||
struct nv_drm_connector {
|
||||
NvU32 physicalIndex;
|
||||
|
||||
NvBool internal;
|
||||
NvKmsConnectorType type;
|
||||
|
||||
char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH];
|
||||
|
||||
struct nv_drm_encoder *nv_detected_encoder;
|
||||
struct edid *edid;
|
||||
|
||||
atomic_t connection_status_dirty;
|
||||
|
||||
/**
|
||||
* @modeset_permission_filep:
|
||||
*
|
||||
* The filep using this connector with DRM_IOCTL_NVIDIA_GRANT_PERMISSIONS.
|
||||
*/
|
||||
struct drm_file *modeset_permission_filep;
|
||||
|
||||
/**
|
||||
* @modeset_permission_crtc:
|
||||
*
|
||||
* The crtc using this connector with DRM_IOCTL_NVIDIA_GRANT_PERMISSIONS.
|
||||
*/
|
||||
struct nv_drm_crtc *modeset_permission_crtc;
|
||||
|
||||
struct drm_connector base;
|
||||
};
|
||||
|
||||
static inline struct nv_drm_connector *to_nv_connector(
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
if (connector == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return container_of(connector, struct nv_drm_connector, base);
|
||||
}
|
||||
|
||||
static inline void nv_drm_connector_mark_connection_status_dirty(
|
||||
struct nv_drm_connector *nv_connector)
|
||||
{
|
||||
atomic_cmpxchg(&nv_connector->connection_status_dirty, false, true);
|
||||
}
|
||||
|
||||
static inline bool nv_drm_connector_check_connection_status_dirty_and_clear(
|
||||
struct nv_drm_connector *nv_connector)
|
||||
{
|
||||
return atomic_cmpxchg(
|
||||
&nv_connector->connection_status_dirty,
|
||||
true,
|
||||
false) == true;
|
||||
}
|
||||
|
||||
struct drm_connector*
|
||||
nv_drm_get_connector(struct drm_device *dev,
|
||||
NvU32 physicalIndex, NvKmsConnectorType type,
|
||||
NvBool internal,
|
||||
char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]);
|
||||
|
||||
bool nv_drm_connector_revoke_permissions(struct drm_device *dev,
|
||||
struct nv_drm_connector *nv_connector);
|
||||
|
||||
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
|
||||
|
||||
#endif /* __NVIDIA_DRM_CONNECTOR_H__ */
|
||||
3131
kernel-open/nvidia-drm/nvidia-drm-crtc.c
Normal file
3131
kernel-open/nvidia-drm/nvidia-drm-crtc.c
Normal file
File diff suppressed because it is too large
Load Diff
354
kernel-open/nvidia-drm/nvidia-drm-crtc.h
Normal file
354
kernel-open/nvidia-drm/nvidia-drm-crtc.h
Normal file
@@ -0,0 +1,354 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_CRTC_H__
|
||||
#define __NVIDIA_DRM_CRTC_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
|
||||
#include "nvidia-drm-helper.h"
|
||||
|
||||
#if defined(NV_DRM_DRMP_H_PRESENT)
|
||||
#include <drm/drmP.h>
|
||||
#endif
|
||||
|
||||
#include <drm/drm_crtc.h>
|
||||
|
||||
#include "nvtypes.h"
|
||||
#include "nvkms-kapi.h"
|
||||
|
||||
enum nv_drm_transfer_function {
|
||||
NV_DRM_TRANSFER_FUNCTION_DEFAULT,
|
||||
NV_DRM_TRANSFER_FUNCTION_LINEAR,
|
||||
NV_DRM_TRANSFER_FUNCTION_PQ,
|
||||
NV_DRM_TRANSFER_FUNCTION_MAX,
|
||||
};
|
||||
|
||||
struct nv_drm_crtc {
|
||||
NvU32 head;
|
||||
|
||||
/**
|
||||
* @flip_list:
|
||||
*
|
||||
* List of flips pending to get processed by __nv_drm_handle_flip_event().
|
||||
* Protected by @flip_list_lock.
|
||||
*/
|
||||
struct list_head flip_list;
|
||||
|
||||
/**
|
||||
* @flip_list_lock:
|
||||
*
|
||||
* Spinlock to protect @flip_list.
|
||||
*/
|
||||
spinlock_t flip_list_lock;
|
||||
|
||||
/**
|
||||
* @modeset_permission_filep:
|
||||
*
|
||||
* The filep using this crtc with DRM_IOCTL_NVIDIA_GRANT_PERMISSIONS.
|
||||
*/
|
||||
struct drm_file *modeset_permission_filep;
|
||||
|
||||
struct NvKmsLUTCaps olut_caps;
|
||||
|
||||
struct drm_crtc base;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nv_drm_flip - flip state
|
||||
*
|
||||
* This state is getting used to consume DRM completion event associated
|
||||
* with each crtc state from atomic commit.
|
||||
*
|
||||
* Function nv_drm_atomic_apply_modeset_config() consumes DRM completion
|
||||
* event, save it into flip state associated with crtc and queue flip state into
|
||||
* crtc's flip list and commits atomic update to hardware.
|
||||
*/
|
||||
struct nv_drm_flip {
|
||||
/**
|
||||
* @event:
|
||||
*
|
||||
* Optional pointer to a DRM event to signal upon completion of
|
||||
* the state update.
|
||||
*/
|
||||
struct drm_pending_vblank_event *event;
|
||||
|
||||
/**
|
||||
* @pending_events
|
||||
*
|
||||
* Number of HW events pending to signal completion of the state
|
||||
* update.
|
||||
*/
|
||||
uint32_t pending_events;
|
||||
|
||||
/**
|
||||
* @list_entry:
|
||||
*
|
||||
* Entry on the per-CRTC &nv_drm_crtc.flip_list. Protected by
|
||||
* &nv_drm_crtc.flip_list_lock.
|
||||
*/
|
||||
struct list_head list_entry;
|
||||
|
||||
/**
|
||||
* @deferred_flip_list
|
||||
*
|
||||
* List flip objects whose processing is deferred until processing of
|
||||
* this flip object. Protected by &nv_drm_crtc.flip_list_lock.
|
||||
* nv_drm_atomic_commit() gets last flip object from
|
||||
* nv_drm_crtc:flip_list and add deferred flip objects into
|
||||
* @deferred_flip_list, __nv_drm_handle_flip_event() processes
|
||||
* @deferred_flip_list.
|
||||
*/
|
||||
struct list_head deferred_flip_list;
|
||||
};
|
||||
|
||||
struct nv_drm_crtc_state {
|
||||
/**
|
||||
* @base:
|
||||
*
|
||||
* Base DRM crtc state object for this.
|
||||
*/
|
||||
struct drm_crtc_state base;
|
||||
|
||||
/**
|
||||
* @head_req_config:
|
||||
*
|
||||
* Requested head's modeset configuration corresponding to this crtc state.
|
||||
*/
|
||||
struct NvKmsKapiHeadRequestedConfig req_config;
|
||||
|
||||
/**
|
||||
* @nv_flip:
|
||||
*
|
||||
* Flip state associated with this crtc state, gets allocated
|
||||
* by nv_drm_atomic_crtc_duplicate_state(), on successful commit it gets
|
||||
* consumed and queued into flip list by
|
||||
* nv_drm_atomic_apply_modeset_config() and finally gets destroyed
|
||||
* by __nv_drm_handle_flip_event() after getting processed.
|
||||
*
|
||||
* In case of failure of atomic commit, this flip state getting destroyed by
|
||||
* nv_drm_atomic_crtc_destroy_state().
|
||||
*/
|
||||
struct nv_drm_flip *nv_flip;
|
||||
|
||||
enum nv_drm_transfer_function regamma_tf;
|
||||
struct drm_property_blob *regamma_lut;
|
||||
uint64_t regamma_divisor;
|
||||
struct nv_drm_lut_surface *regamma_drm_lut_surface;
|
||||
NvBool regamma_changed;
|
||||
};
|
||||
|
||||
static inline struct nv_drm_crtc_state *to_nv_crtc_state(struct drm_crtc_state *state)
|
||||
{
|
||||
return container_of(state, struct nv_drm_crtc_state, base);
|
||||
}
|
||||
|
||||
static inline const struct nv_drm_crtc_state *to_nv_crtc_state_const(const struct drm_crtc_state *state)
|
||||
{
|
||||
return container_of(state, struct nv_drm_crtc_state, base);
|
||||
}
|
||||
|
||||
struct nv_drm_plane {
|
||||
/**
|
||||
* @base:
|
||||
*
|
||||
* Base DRM plane object for this plane.
|
||||
*/
|
||||
struct drm_plane base;
|
||||
|
||||
/**
|
||||
* @defaultCompositionMode:
|
||||
*
|
||||
* Default composition blending mode of this plane.
|
||||
*/
|
||||
enum NvKmsCompositionBlendingMode defaultCompositionMode;
|
||||
|
||||
/**
|
||||
* @layer_idx
|
||||
*
|
||||
* Index of this plane in the per head array of layers.
|
||||
*/
|
||||
uint32_t layer_idx;
|
||||
|
||||
/**
|
||||
* @supportsColorProperties
|
||||
*
|
||||
* If true, supports the COLOR_ENCODING and COLOR_RANGE properties.
|
||||
*/
|
||||
bool supportsColorProperties;
|
||||
|
||||
struct NvKmsLUTCaps ilut_caps;
|
||||
struct NvKmsLUTCaps tmo_caps;
|
||||
};
|
||||
|
||||
static inline struct nv_drm_plane *to_nv_plane(struct drm_plane *plane)
|
||||
{
|
||||
if (plane == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return container_of(plane, struct nv_drm_plane, base);
|
||||
}
|
||||
|
||||
struct nv_drm_nvkms_surface {
|
||||
struct NvKmsKapiDevice *pDevice;
|
||||
struct NvKmsKapiMemory *nvkms_memory;
|
||||
struct NvKmsKapiSurface *nvkms_surface;
|
||||
void *buffer;
|
||||
struct kref refcount;
|
||||
};
|
||||
|
||||
struct nv_drm_nvkms_surface_params {
|
||||
NvU32 width;
|
||||
NvU32 height;
|
||||
size_t surface_size;
|
||||
enum NvKmsSurfaceMemoryFormat format;
|
||||
};
|
||||
|
||||
struct nv_drm_lut_surface {
|
||||
struct nv_drm_nvkms_surface base;
|
||||
struct {
|
||||
NvU32 vssSegments;
|
||||
enum NvKmsLUTVssType vssType;
|
||||
|
||||
NvU32 lutEntries;
|
||||
enum NvKmsLUTFormat entryFormat;
|
||||
|
||||
} properties;
|
||||
};
|
||||
|
||||
struct nv_drm_plane_state {
|
||||
struct drm_plane_state base;
|
||||
s32 __user *fd_user_ptr;
|
||||
enum nv_drm_input_color_space input_colorspace;
|
||||
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
|
||||
struct drm_property_blob *hdr_output_metadata;
|
||||
#endif
|
||||
struct drm_property_blob *lms_ctm;
|
||||
struct drm_property_blob *lms_to_itp_ctm;
|
||||
struct drm_property_blob *itp_to_lms_ctm;
|
||||
struct drm_property_blob *blend_ctm;
|
||||
|
||||
enum nv_drm_transfer_function degamma_tf;
|
||||
struct drm_property_blob *degamma_lut;
|
||||
uint64_t degamma_multiplier; /* S31.32 Sign-Magnitude Format */
|
||||
struct nv_drm_lut_surface *degamma_drm_lut_surface;
|
||||
NvBool degamma_changed;
|
||||
|
||||
struct drm_property_blob *tmo_lut;
|
||||
struct nv_drm_lut_surface *tmo_drm_lut_surface;
|
||||
NvBool tmo_changed;
|
||||
};
|
||||
|
||||
static inline struct nv_drm_plane_state *to_nv_drm_plane_state(struct drm_plane_state *state)
|
||||
{
|
||||
return container_of(state, struct nv_drm_plane_state, base);
|
||||
}
|
||||
|
||||
static inline const struct nv_drm_plane_state *to_nv_drm_plane_state_const(const struct drm_plane_state *state)
|
||||
{
|
||||
return container_of(state, const struct nv_drm_plane_state, base);
|
||||
}
|
||||
|
||||
static inline struct nv_drm_crtc *to_nv_crtc(struct drm_crtc *crtc)
|
||||
{
|
||||
if (crtc == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return container_of(crtc, struct nv_drm_crtc, base);
|
||||
}
|
||||
|
||||
/*
|
||||
* CRTCs are static objects, list does not change once after initialization and
|
||||
* before teardown of device. Initialization/teardown paths are single
|
||||
* threaded, so no locking required.
|
||||
*/
|
||||
static inline
|
||||
struct nv_drm_crtc *nv_drm_crtc_lookup(struct nv_drm_device *nv_dev, NvU32 head)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
nv_drm_for_each_crtc(crtc, nv_dev->dev) {
|
||||
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
|
||||
|
||||
if (nv_crtc->head == head) {
|
||||
return nv_crtc;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* nv_drm_crtc_enqueue_flip - Enqueue nv_drm_flip object to flip_list of crtc.
|
||||
*/
|
||||
static inline void nv_drm_crtc_enqueue_flip(struct nv_drm_crtc *nv_crtc,
|
||||
struct nv_drm_flip *nv_flip)
|
||||
{
|
||||
spin_lock(&nv_crtc->flip_list_lock);
|
||||
list_add(&nv_flip->list_entry, &nv_crtc->flip_list);
|
||||
spin_unlock(&nv_crtc->flip_list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* nv_drm_crtc_dequeue_flip - Dequeue nv_drm_flip object to flip_list of crtc.
|
||||
*/
|
||||
static inline
|
||||
struct nv_drm_flip *nv_drm_crtc_dequeue_flip(struct nv_drm_crtc *nv_crtc)
|
||||
{
|
||||
struct nv_drm_flip *nv_flip = NULL;
|
||||
uint32_t pending_events = 0;
|
||||
|
||||
spin_lock(&nv_crtc->flip_list_lock);
|
||||
nv_flip = list_first_entry_or_null(&nv_crtc->flip_list,
|
||||
struct nv_drm_flip, list_entry);
|
||||
if (likely(nv_flip != NULL)) {
|
||||
/*
|
||||
* Decrement pending_event count and dequeue flip object if
|
||||
* pending_event count becomes 0.
|
||||
*/
|
||||
pending_events = --nv_flip->pending_events;
|
||||
if (!pending_events) {
|
||||
list_del(&nv_flip->list_entry);
|
||||
}
|
||||
}
|
||||
spin_unlock(&nv_crtc->flip_list_lock);
|
||||
|
||||
if (WARN_ON(nv_flip == NULL) || pending_events) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return nv_flip;
|
||||
}
|
||||
|
||||
void nv_drm_enumerate_crtcs_and_planes(
|
||||
struct nv_drm_device *nv_dev,
|
||||
const struct NvKmsKapiDeviceResourcesInfo *pResInfo);
|
||||
|
||||
int nv_drm_get_crtc_crc32_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep);
|
||||
|
||||
int nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep);
|
||||
|
||||
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
|
||||
|
||||
#endif /* __NVIDIA_DRM_CRTC_H__ */
|
||||
2236
kernel-open/nvidia-drm/nvidia-drm-drv.c
Normal file
2236
kernel-open/nvidia-drm/nvidia-drm-drv.c
Normal file
File diff suppressed because it is too large
Load Diff
44
kernel-open/nvidia-drm/nvidia-drm-drv.h
Normal file
44
kernel-open/nvidia-drm/nvidia-drm-drv.h
Normal file
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_DRV_H__
|
||||
#define __NVIDIA_DRM_DRV_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
struct NvKmsKapiGpuInfo;
|
||||
|
||||
int nv_drm_probe_devices(void);
|
||||
|
||||
void nv_drm_remove_devices(void);
|
||||
|
||||
void nv_drm_suspend_resume(NvBool suspend);
|
||||
|
||||
void nv_drm_register_drm_device(const struct NvKmsKapiGpuInfo *);
|
||||
|
||||
void nv_drm_update_drm_driver_features(void);
|
||||
|
||||
#endif /* defined(NV_DRM_AVAILABLE) */
|
||||
|
||||
#endif /* __NVIDIA_DRM_DRV_H__ */
|
||||
337
kernel-open/nvidia-drm/nvidia-drm-encoder.c
Normal file
337
kernel-open/nvidia-drm/nvidia-drm-encoder.c
Normal file
@@ -0,0 +1,337 @@
|
||||
/*
|
||||
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
|
||||
#include "nvidia-drm-priv.h"
|
||||
#include "nvidia-drm-encoder.h"
|
||||
#include "nvidia-drm-utils.h"
|
||||
#include "nvidia-drm-connector.h"
|
||||
#include "nvidia-drm-crtc.h"
|
||||
#include "nvidia-drm-helper.h"
|
||||
|
||||
#include "nvmisc.h"
|
||||
|
||||
/*
|
||||
* Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h")
|
||||
* moves a number of helper function definitions from
|
||||
* drm/drm_crtc_helper.h to a new drm_probe_helper.h.
|
||||
*/
|
||||
#if defined(NV_DRM_DRM_PROBE_HELPER_H_PRESENT)
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#endif
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
|
||||
static void nv_drm_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
struct nv_drm_encoder *nv_encoder = to_nv_encoder(encoder);
|
||||
|
||||
drm_encoder_cleanup(encoder);
|
||||
|
||||
nv_drm_free(nv_encoder);
|
||||
}
|
||||
|
||||
static const struct drm_encoder_funcs nv_encoder_funcs = {
|
||||
.destroy = nv_drm_encoder_destroy,
|
||||
};
|
||||
|
||||
static bool nv_drm_encoder_mode_fixup(struct drm_encoder *encoder,
|
||||
const struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nv_drm_encoder_prepare(struct drm_encoder *encoder)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
static void nv_drm_encoder_commit(struct drm_encoder *encoder)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
static void nv_drm_encoder_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
static const struct drm_encoder_helper_funcs nv_encoder_helper_funcs = {
|
||||
.mode_fixup = nv_drm_encoder_mode_fixup,
|
||||
.prepare = nv_drm_encoder_prepare,
|
||||
.commit = nv_drm_encoder_commit,
|
||||
.mode_set = nv_drm_encoder_mode_set,
|
||||
};
|
||||
|
||||
static uint32_t get_crtc_mask(struct drm_device *dev, uint32_t headMask)
|
||||
{
|
||||
struct drm_crtc *crtc = NULL;
|
||||
uint32_t crtc_mask = 0x0;
|
||||
|
||||
nv_drm_for_each_crtc(crtc, dev) {
|
||||
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
|
||||
|
||||
if (headMask & NVBIT(nv_crtc->head)) {
|
||||
crtc_mask |= drm_crtc_mask(crtc);
|
||||
}
|
||||
}
|
||||
|
||||
return crtc_mask;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function to create new encoder for given NvKmsKapiDisplay
|
||||
* with given signal format.
|
||||
*/
|
||||
static struct drm_encoder*
|
||||
nv_drm_encoder_new(struct drm_device *dev,
|
||||
NvKmsKapiDisplay hDisplay,
|
||||
NvKmsConnectorSignalFormat format,
|
||||
unsigned int crtc_mask)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
|
||||
struct nv_drm_encoder *nv_encoder = NULL;
|
||||
|
||||
int ret = 0;
|
||||
|
||||
/* Allocate an NVIDIA encoder object */
|
||||
|
||||
nv_encoder = nv_drm_calloc(1, sizeof(*nv_encoder));
|
||||
|
||||
if (nv_encoder == NULL) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to allocate memory for NVIDIA-DRM encoder object");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
nv_encoder->hDisplay = hDisplay;
|
||||
|
||||
/* Initialize the base encoder object and add it to the drm subsystem */
|
||||
|
||||
ret = drm_encoder_init(dev,
|
||||
&nv_encoder->base, &nv_encoder_funcs,
|
||||
nvkms_connector_signal_to_drm_encoder_signal(format),
|
||||
NULL);
|
||||
if (ret != 0) {
|
||||
nv_drm_free(nv_encoder);
|
||||
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to initialize encoder created from NvKmsKapiDisplay 0x%08x",
|
||||
hDisplay);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
nv_encoder->base.possible_crtcs = crtc_mask;
|
||||
|
||||
drm_encoder_helper_add(&nv_encoder->base, &nv_encoder_helper_funcs);
|
||||
|
||||
return &nv_encoder->base;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add encoder for given NvKmsKapiDisplay
|
||||
*/
|
||||
struct drm_encoder*
|
||||
nv_drm_add_encoder(struct drm_device *dev, NvKmsKapiDisplay hDisplay)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
|
||||
struct NvKmsKapiStaticDisplayInfo *displayInfo = NULL;
|
||||
struct NvKmsKapiConnectorInfo *connectorInfo = NULL;
|
||||
|
||||
struct drm_encoder *encoder = NULL;
|
||||
struct nv_drm_encoder *nv_encoder = NULL;
|
||||
|
||||
struct drm_connector *connector = NULL;
|
||||
|
||||
int ret = 0;
|
||||
|
||||
/* Query NvKmsKapiStaticDisplayInfo and NvKmsKapiConnectorInfo */
|
||||
|
||||
if ((displayInfo = nv_drm_calloc(1, sizeof(*displayInfo))) == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!nvKms->getStaticDisplayInfo(nv_dev->pDevice, hDisplay, displayInfo)) {
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
connectorInfo = nvkms_get_connector_info(nv_dev->pDevice,
|
||||
displayInfo->connectorHandle);
|
||||
|
||||
if (IS_ERR(connectorInfo)) {
|
||||
ret = PTR_ERR(connectorInfo);
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Create and add drm encoder */
|
||||
|
||||
encoder = nv_drm_encoder_new(dev,
|
||||
displayInfo->handle,
|
||||
connectorInfo->signalFormat,
|
||||
get_crtc_mask(dev, displayInfo->headMask));
|
||||
|
||||
if (IS_ERR(encoder)) {
|
||||
ret = PTR_ERR(encoder);
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Get connector from respective physical index */
|
||||
|
||||
connector =
|
||||
nv_drm_get_connector(dev,
|
||||
connectorInfo->physicalIndex,
|
||||
connectorInfo->type,
|
||||
displayInfo->internal, displayInfo->dpAddress);
|
||||
|
||||
if (IS_ERR(connector)) {
|
||||
ret = PTR_ERR(connector);
|
||||
goto failed_connector_encoder_attach;
|
||||
}
|
||||
|
||||
/* Attach encoder and connector */
|
||||
|
||||
ret = nv_drm_connector_attach_encoder(connector, encoder);
|
||||
|
||||
if (ret != 0) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to attach encoder created from NvKmsKapiDisplay 0x%08x "
|
||||
"to connector",
|
||||
hDisplay);
|
||||
goto failed_connector_encoder_attach;
|
||||
}
|
||||
|
||||
nv_encoder = to_nv_encoder(encoder);
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
|
||||
nv_encoder->nv_connector = to_nv_connector(connector);
|
||||
|
||||
nv_drm_connector_mark_connection_status_dirty(nv_encoder->nv_connector);
|
||||
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
goto done;
|
||||
|
||||
failed_connector_encoder_attach:
|
||||
|
||||
drm_encoder_cleanup(encoder);
|
||||
|
||||
nv_drm_free(encoder);
|
||||
|
||||
done:
|
||||
|
||||
nv_drm_free(displayInfo);
|
||||
|
||||
nv_drm_free(connectorInfo);
|
||||
|
||||
return ret != 0 ? ERR_PTR(ret) : encoder;
|
||||
}
|
||||
|
||||
static inline struct nv_drm_encoder*
|
||||
get_nv_encoder_from_nvkms_display(struct drm_device *dev,
|
||||
NvKmsKapiDisplay hDisplay)
|
||||
{
|
||||
struct drm_encoder *encoder;
|
||||
|
||||
nv_drm_for_each_encoder(encoder, dev) {
|
||||
struct nv_drm_encoder *nv_encoder = to_nv_encoder(encoder);
|
||||
|
||||
if (nv_encoder->hDisplay == hDisplay) {
|
||||
return nv_encoder;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void nv_drm_handle_display_change(struct nv_drm_device *nv_dev,
|
||||
NvKmsKapiDisplay hDisplay)
|
||||
{
|
||||
struct drm_device *dev = nv_dev->dev;
|
||||
struct nv_drm_encoder *nv_encoder = NULL;
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
|
||||
nv_encoder = get_nv_encoder_from_nvkms_display(dev, hDisplay);
|
||||
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
if (nv_encoder == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
nv_drm_connector_mark_connection_status_dirty(nv_encoder->nv_connector);
|
||||
|
||||
schedule_delayed_work(&nv_dev->hotplug_event_work, 0);
|
||||
}
|
||||
|
||||
void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
|
||||
NvKmsKapiDisplay hDisplay)
|
||||
{
|
||||
struct drm_device *dev = nv_dev->dev;
|
||||
|
||||
struct drm_encoder *encoder = NULL;
|
||||
struct nv_drm_encoder *nv_encoder = NULL;
|
||||
|
||||
/*
|
||||
* Look for an existing encoder with the same hDisplay and
|
||||
* use it if available.
|
||||
*/
|
||||
|
||||
nv_encoder = get_nv_encoder_from_nvkms_display(dev, hDisplay);
|
||||
|
||||
if (nv_encoder != NULL) {
|
||||
NV_DRM_DEV_LOG_INFO(
|
||||
nv_dev,
|
||||
"Encoder with NvKmsKapiDisplay 0x%08x already exists.",
|
||||
hDisplay);
|
||||
return;
|
||||
}
|
||||
|
||||
encoder = nv_drm_add_encoder(dev, hDisplay);
|
||||
|
||||
if (IS_ERR(encoder)) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to add encoder for NvKmsKapiDisplay 0x%08x",
|
||||
hDisplay);
|
||||
return;
|
||||
}
|
||||
|
||||
schedule_delayed_work(&nv_dev->hotplug_event_work, 0);
|
||||
}
|
||||
#endif
|
||||
64
kernel-open/nvidia-drm/nvidia-drm-encoder.h
Normal file
64
kernel-open/nvidia-drm/nvidia-drm-encoder.h
Normal file
@@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_ENCODER_H__
|
||||
#define __NVIDIA_DRM_ENCODER_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
|
||||
#include "nvidia-drm-priv.h"
|
||||
|
||||
#include <drm/drm_encoder.h>
|
||||
|
||||
#include "nvkms-kapi.h"
|
||||
|
||||
struct nv_drm_encoder {
|
||||
NvKmsKapiDisplay hDisplay;
|
||||
|
||||
struct nv_drm_connector *nv_connector;
|
||||
|
||||
struct drm_encoder base;
|
||||
};
|
||||
|
||||
static inline struct nv_drm_encoder *to_nv_encoder(
|
||||
struct drm_encoder *encoder)
|
||||
{
|
||||
if (encoder == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return container_of(encoder, struct nv_drm_encoder, base);
|
||||
}
|
||||
|
||||
struct drm_encoder*
|
||||
nv_drm_add_encoder(struct drm_device *dev, NvKmsKapiDisplay hDisplay);
|
||||
|
||||
void nv_drm_handle_display_change(struct nv_drm_device *nv_dev,
|
||||
NvKmsKapiDisplay hDisplay);
|
||||
|
||||
void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
|
||||
NvKmsKapiDisplay hDisplay);
|
||||
|
||||
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
|
||||
|
||||
#endif /* __NVIDIA_DRM_ENCODER_H__ */
|
||||
309
kernel-open/nvidia-drm/nvidia-drm-fb.c
Normal file
309
kernel-open/nvidia-drm/nvidia-drm-fb.c
Normal file
@@ -0,0 +1,309 @@
|
||||
/*
|
||||
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
|
||||
#include "nvidia-drm-priv.h"
|
||||
#include "nvidia-drm-ioctl.h"
|
||||
#include "nvidia-drm-fb.h"
|
||||
#include "nvidia-drm-utils.h"
|
||||
#include "nvidia-drm-gem.h"
|
||||
#include "nvidia-drm-helper.h"
|
||||
#include "nvidia-drm-format.h"
|
||||
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
|
||||
static void __nv_drm_framebuffer_free(struct nv_drm_framebuffer *nv_fb)
|
||||
{
|
||||
struct drm_framebuffer *fb = &nv_fb->base;
|
||||
uint32_t i;
|
||||
|
||||
/* Unreference gem object */
|
||||
for (i = 0; i < NVKMS_MAX_PLANES_PER_SURFACE; i++) {
|
||||
struct drm_gem_object *gem = fb->obj[i];
|
||||
if (gem != NULL) {
|
||||
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
|
||||
nv_drm_gem_object_unreference_unlocked(nv_gem);
|
||||
}
|
||||
}
|
||||
|
||||
/* Free framebuffer */
|
||||
nv_drm_free(nv_fb);
|
||||
}
|
||||
|
||||
static void nv_drm_framebuffer_destroy(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(fb->dev);
|
||||
struct nv_drm_framebuffer *nv_fb = to_nv_framebuffer(fb);
|
||||
|
||||
/* Cleaup core framebuffer object */
|
||||
|
||||
drm_framebuffer_cleanup(fb);
|
||||
|
||||
/* Free NvKmsKapiSurface associated with this framebuffer object */
|
||||
|
||||
nvKms->destroySurface(nv_dev->pDevice, nv_fb->pSurface);
|
||||
|
||||
__nv_drm_framebuffer_free(nv_fb);
|
||||
}
|
||||
|
||||
static int
|
||||
nv_drm_framebuffer_create_handle(struct drm_framebuffer *fb,
|
||||
struct drm_file *file, unsigned int *handle)
|
||||
{
|
||||
return nv_drm_gem_handle_create(file,
|
||||
to_nv_gem_object(fb->obj[0]),
|
||||
handle);
|
||||
}
|
||||
|
||||
static struct drm_framebuffer_funcs nv_framebuffer_funcs = {
|
||||
.destroy = nv_drm_framebuffer_destroy,
|
||||
.create_handle = nv_drm_framebuffer_create_handle,
|
||||
};
|
||||
|
||||
static struct nv_drm_framebuffer *nv_drm_framebuffer_alloc(
|
||||
struct nv_drm_device *nv_dev,
|
||||
struct drm_file *file,
|
||||
const struct drm_mode_fb_cmd2 *cmd)
|
||||
{
|
||||
struct nv_drm_framebuffer *nv_fb;
|
||||
struct nv_drm_gem_object *nv_gem;
|
||||
const int num_planes = nv_drm_format_num_planes(cmd->pixel_format);
|
||||
uint32_t i;
|
||||
|
||||
/* Allocate memory for the framebuffer object */
|
||||
nv_fb = nv_drm_calloc(1, sizeof(*nv_fb));
|
||||
|
||||
if (nv_fb == NULL) {
|
||||
NV_DRM_DEV_DEBUG_DRIVER(
|
||||
nv_dev,
|
||||
"Failed to allocate memory for framebuffer object");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
if (num_planes > NVKMS_MAX_PLANES_PER_SURFACE) {
|
||||
NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "Unsupported number of planes");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_planes; i++) {
|
||||
nv_gem = nv_drm_gem_object_lookup(file, cmd->handles[i]);
|
||||
|
||||
if (nv_gem == NULL) {
|
||||
NV_DRM_DEV_DEBUG_DRIVER(
|
||||
nv_dev,
|
||||
"Failed to find gem object of type nvkms memory");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
nv_fb->base.obj[i] = &nv_gem->base;
|
||||
}
|
||||
|
||||
return nv_fb;
|
||||
|
||||
failed:
|
||||
|
||||
__nv_drm_framebuffer_free(nv_fb);
|
||||
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
static int nv_drm_framebuffer_init(struct drm_device *dev,
|
||||
struct nv_drm_framebuffer *nv_fb,
|
||||
enum NvKmsSurfaceMemoryFormat format,
|
||||
bool have_modifier,
|
||||
uint64_t modifier)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct NvKmsKapiCreateSurfaceParams params = { };
|
||||
struct nv_drm_gem_object *nv_gem;
|
||||
struct drm_framebuffer *fb = &nv_fb->base;
|
||||
uint32_t i;
|
||||
int ret;
|
||||
|
||||
/* Initialize the base framebuffer object and add it to drm subsystem */
|
||||
|
||||
ret = drm_framebuffer_init(dev, fb, &nv_framebuffer_funcs);
|
||||
if (ret != 0) {
|
||||
NV_DRM_DEV_DEBUG_DRIVER(
|
||||
nv_dev,
|
||||
"Failed to initialize framebuffer object");
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < NVKMS_MAX_PLANES_PER_SURFACE; i++) {
|
||||
struct drm_gem_object *gem = fb->obj[i];
|
||||
if (gem != NULL) {
|
||||
nv_gem = to_nv_gem_object(gem);
|
||||
|
||||
params.planes[i].memory = nv_gem->pMemory;
|
||||
params.planes[i].offset = fb->offsets[i];
|
||||
params.planes[i].pitch = fb->pitches[i];
|
||||
}
|
||||
}
|
||||
params.height = fb->height;
|
||||
params.width = fb->width;
|
||||
params.format = format;
|
||||
|
||||
if (have_modifier) {
|
||||
params.explicit_layout = true;
|
||||
params.layout = (modifier & 0x10) ?
|
||||
NvKmsSurfaceMemoryLayoutBlockLinear :
|
||||
NvKmsSurfaceMemoryLayoutPitch;
|
||||
|
||||
// See definition of DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D, we are testing
|
||||
// 'c', the lossless compression field of the modifier
|
||||
if (params.layout == NvKmsSurfaceMemoryLayoutBlockLinear &&
|
||||
(modifier >> 23) & 0x7) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Cannot create FB from compressible surface allocation");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
params.log2GobsPerBlockY = modifier & 0xf;
|
||||
} else {
|
||||
params.explicit_layout = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX work around an invalid pitch assumption in DRM.
|
||||
*
|
||||
* The smallest pitch the display hardware allows is 256.
|
||||
*
|
||||
* If a DRM client allocates a 32x32 cursor surface through
|
||||
* DRM_IOCTL_MODE_CREATE_DUMB, we'll correctly round the pitch to 256:
|
||||
*
|
||||
* pitch = round(32width * 4Bpp, 256) = 256
|
||||
*
|
||||
* and then allocate an 8k surface:
|
||||
*
|
||||
* size = pitch * 32height = 8196
|
||||
*
|
||||
* and report the rounded pitch and size back to the client through the
|
||||
* struct drm_mode_create_dumb ioctl params.
|
||||
*
|
||||
* But when the DRM client passes that buffer object handle to
|
||||
* DRM_IOCTL_MODE_CURSOR, the client has no way to specify the pitch. This
|
||||
* path in drm:
|
||||
*
|
||||
* DRM_IOCTL_MODE_CURSOR
|
||||
* drm_mode_cursor_ioctl()
|
||||
* drm_mode_cursor_common()
|
||||
* drm_mode_cursor_universal()
|
||||
*
|
||||
* will implicitly create a framebuffer from the buffer object, and compute
|
||||
* the pitch as width x 32 (without aligning to our minimum pitch).
|
||||
*
|
||||
* Intercept this case and force the pitch back to 256.
|
||||
*/
|
||||
if ((params.width == 32) &&
|
||||
(params.height == 32) &&
|
||||
(params.planes[0].pitch == 128)) {
|
||||
params.planes[0].pitch = 256;
|
||||
}
|
||||
|
||||
/* Create NvKmsKapiSurface */
|
||||
|
||||
nv_fb->pSurface = nvKms->createSurface(nv_dev->pDevice, ¶ms);
|
||||
if (nv_fb->pSurface == NULL) {
|
||||
NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "Failed to create NvKmsKapiSurface");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
drm_framebuffer_cleanup(fb);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
struct drm_framebuffer *nv_drm_framebuffer_create(
|
||||
struct drm_device *dev,
|
||||
struct drm_file *file,
|
||||
const struct drm_mode_fb_cmd2 *cmd)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct nv_drm_framebuffer *nv_fb;
|
||||
uint64_t modifier = 0;
|
||||
int ret;
|
||||
enum NvKmsSurfaceMemoryFormat format;
|
||||
int i;
|
||||
bool have_modifier = false;
|
||||
|
||||
/* Check whether NvKms supports the given pixel format */
|
||||
if (!nv_drm_format_to_nvkms_format(cmd->pixel_format, &format)) {
|
||||
NV_DRM_DEV_DEBUG_DRIVER(
|
||||
nv_dev,
|
||||
"Unsupported drm pixel format 0x%08x", cmd->pixel_format);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (cmd->flags & DRM_MODE_FB_MODIFIERS) {
|
||||
have_modifier = true;
|
||||
modifier = cmd->modifier[0];
|
||||
|
||||
for (i = 0; nv_dev->modifiers[i] != DRM_FORMAT_MOD_INVALID; i++) {
|
||||
if (nv_dev->modifiers[i] == modifier) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (nv_dev->modifiers[i] == DRM_FORMAT_MOD_INVALID) {
|
||||
NV_DRM_DEV_DEBUG_DRIVER(
|
||||
nv_dev,
|
||||
"Invalid format modifier for framebuffer object: 0x%016" NvU64_fmtx,
|
||||
modifier);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
nv_fb = nv_drm_framebuffer_alloc(nv_dev, file, cmd);
|
||||
if (IS_ERR(nv_fb)) {
|
||||
return (struct drm_framebuffer *)nv_fb;
|
||||
}
|
||||
|
||||
/* Fill out framebuffer metadata from the userspace fb creation request */
|
||||
|
||||
drm_helper_mode_fill_fb_struct(
|
||||
dev,
|
||||
&nv_fb->base,
|
||||
cmd);
|
||||
|
||||
/*
|
||||
* Finish up FB initialization by creating the backing NVKMS surface and
|
||||
* publishing the DRM fb
|
||||
*/
|
||||
|
||||
ret = nv_drm_framebuffer_init(dev, nv_fb, format, have_modifier, modifier);
|
||||
|
||||
if (ret != 0) {
|
||||
__nv_drm_framebuffer_free(nv_fb);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return &nv_fb->base;
|
||||
}
|
||||
|
||||
#endif
|
||||
60
kernel-open/nvidia-drm/nvidia-drm-fb.h
Normal file
60
kernel-open/nvidia-drm/nvidia-drm-fb.h
Normal file
@@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_FB_H__
|
||||
#define __NVIDIA_DRM_FB_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
|
||||
#if defined(NV_DRM_DRMP_H_PRESENT)
|
||||
#include <drm/drmP.h>
|
||||
#endif
|
||||
|
||||
#include <drm/drm_framebuffer.h>
|
||||
|
||||
#include "nvkms-kapi.h"
|
||||
|
||||
struct nv_drm_framebuffer {
|
||||
struct NvKmsKapiSurface *pSurface;
|
||||
|
||||
struct drm_framebuffer base;
|
||||
};
|
||||
|
||||
static inline struct nv_drm_framebuffer *to_nv_framebuffer(
|
||||
struct drm_framebuffer *fb)
|
||||
{
|
||||
if (fb == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return container_of(fb, struct nv_drm_framebuffer, base);
|
||||
}
|
||||
|
||||
struct drm_framebuffer *nv_drm_framebuffer_create(
|
||||
struct drm_device *dev,
|
||||
struct drm_file *file,
|
||||
const struct drm_mode_fb_cmd2 *cmd);
|
||||
|
||||
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
|
||||
|
||||
#endif /* __NVIDIA_DRM_FB_H__ */
|
||||
1829
kernel-open/nvidia-drm/nvidia-drm-fence.c
Normal file
1829
kernel-open/nvidia-drm/nvidia-drm-fence.c
Normal file
File diff suppressed because it is too large
Load Diff
60
kernel-open/nvidia-drm/nvidia-drm-fence.h
Normal file
60
kernel-open/nvidia-drm/nvidia-drm-fence.h
Normal file
@@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_PRIME_FENCE_H__
|
||||
#define __NVIDIA_DRM_PRIME_FENCE_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
struct drm_file;
|
||||
struct drm_device;
|
||||
|
||||
int nv_drm_fence_supported_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep);
|
||||
|
||||
int nv_drm_prime_fence_context_create_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep);
|
||||
|
||||
int nv_drm_gem_prime_fence_attach_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep);
|
||||
|
||||
int nv_drm_semsurf_fence_ctx_create_ioctl(struct drm_device *dev,
|
||||
void *data,
|
||||
struct drm_file *filep);
|
||||
|
||||
int nv_drm_semsurf_fence_create_ioctl(struct drm_device *dev,
|
||||
void *data,
|
||||
struct drm_file *filep);
|
||||
|
||||
int nv_drm_semsurf_fence_wait_ioctl(struct drm_device *dev,
|
||||
void *data,
|
||||
struct drm_file *filep);
|
||||
|
||||
int nv_drm_semsurf_fence_attach_ioctl(struct drm_device *dev,
|
||||
void *data,
|
||||
struct drm_file *filep);
|
||||
|
||||
#endif /* NV_DRM_AVAILABLE */
|
||||
|
||||
#endif /* __NVIDIA_DRM_PRIME_FENCE_H__ */
|
||||
209
kernel-open/nvidia-drm/nvidia-drm-format.c
Normal file
209
kernel-open/nvidia-drm/nvidia-drm-format.c
Normal file
@@ -0,0 +1,209 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2025, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
|
||||
#if defined(NV_DRM_DRMP_H_PRESENT)
|
||||
#include <drm/drmP.h>
|
||||
#endif
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bitmap.h>
|
||||
|
||||
#include "nvidia-drm-format.h"
|
||||
#include "nvidia-drm-os-interface.h"
|
||||
|
||||
static const u32 nvkms_to_drm_format[] = {
|
||||
/* RGB formats */
|
||||
[NvKmsSurfaceMemoryFormatA1R5G5B5] = DRM_FORMAT_ARGB1555,
|
||||
[NvKmsSurfaceMemoryFormatX1R5G5B5] = DRM_FORMAT_XRGB1555,
|
||||
[NvKmsSurfaceMemoryFormatR5G6B5] = DRM_FORMAT_RGB565,
|
||||
[NvKmsSurfaceMemoryFormatA8R8G8B8] = DRM_FORMAT_ARGB8888,
|
||||
[NvKmsSurfaceMemoryFormatX8R8G8B8] = DRM_FORMAT_XRGB8888,
|
||||
[NvKmsSurfaceMemoryFormatX8B8G8R8] = DRM_FORMAT_XBGR8888,
|
||||
[NvKmsSurfaceMemoryFormatA2B10G10R10] = DRM_FORMAT_ABGR2101010,
|
||||
[NvKmsSurfaceMemoryFormatX2B10G10R10] = DRM_FORMAT_XBGR2101010,
|
||||
[NvKmsSurfaceMemoryFormatA8B8G8R8] = DRM_FORMAT_ABGR8888,
|
||||
#if defined(DRM_FORMAT_ABGR16161616)
|
||||
/*
|
||||
* DRM_FORMAT_ABGR16161616 was introduced by Linux kernel commit
|
||||
* ff92ecf575a92 (v5.14).
|
||||
*/
|
||||
[NvKmsSurfaceMemoryFormatR16G16B16A16] = DRM_FORMAT_ABGR16161616,
|
||||
#endif
|
||||
#if defined(DRM_FORMAT_ABGR16161616F)
|
||||
[NvKmsSurfaceMemoryFormatRF16GF16BF16AF16] = DRM_FORMAT_ABGR16161616F,
|
||||
#endif
|
||||
#if defined(DRM_FORMAT_XBGR16161616F)
|
||||
[NvKmsSurfaceMemoryFormatRF16GF16BF16XF16] = DRM_FORMAT_XBGR16161616F,
|
||||
#endif
|
||||
|
||||
[NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422] = DRM_FORMAT_YUYV,
|
||||
[NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422] = DRM_FORMAT_UYVY,
|
||||
|
||||
/* YUV semi-planar formats
|
||||
*
|
||||
* NVKMS YUV semi-planar formats are MSB aligned. Yx__UxVx means
|
||||
* that the UV components are packed like UUUUUVVVVV (MSB to LSB)
|
||||
* and Yx_VxUx means VVVVVUUUUU (MSB to LSB).
|
||||
*/
|
||||
|
||||
/*
|
||||
* 2 plane YCbCr
|
||||
* index 0 = Y plane, [7:0] Y
|
||||
* index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
|
||||
* or
|
||||
* index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
|
||||
*/
|
||||
[NvKmsSurfaceMemoryFormatY8___V8U8_N444] = DRM_FORMAT_NV24, /* non-subsampled Cr:Cb plane */
|
||||
[NvKmsSurfaceMemoryFormatY8___U8V8_N444] = DRM_FORMAT_NV42, /* non-subsampled Cb:Cr plane */
|
||||
[NvKmsSurfaceMemoryFormatY8___V8U8_N422] = DRM_FORMAT_NV16, /* 2x1 subsampled Cr:Cb plane */
|
||||
[NvKmsSurfaceMemoryFormatY8___U8V8_N422] = DRM_FORMAT_NV61, /* 2x1 subsampled Cb:Cr plane */
|
||||
[NvKmsSurfaceMemoryFormatY8___V8U8_N420] = DRM_FORMAT_NV12, /* 2x2 subsampled Cr:Cb plane */
|
||||
[NvKmsSurfaceMemoryFormatY8___U8V8_N420] = DRM_FORMAT_NV21, /* 2x2 subsampled Cb:Cr plane */
|
||||
|
||||
#if defined(DRM_FORMAT_P210)
|
||||
/*
|
||||
* 2 plane YCbCr MSB aligned
|
||||
* index 0 = Y plane, [15:0] Y:x [10:6] little endian
|
||||
* index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian
|
||||
*
|
||||
* 2x1 subsampled Cr:Cb plane, 10 bit per channel
|
||||
*/
|
||||
[NvKmsSurfaceMemoryFormatY10___V10U10_N422] = DRM_FORMAT_P210,
|
||||
#endif
|
||||
|
||||
#if defined(DRM_FORMAT_P010)
|
||||
/*
|
||||
* 2 plane YCbCr MSB aligned
|
||||
* index 0 = Y plane, [15:0] Y:x [10:6] little endian
|
||||
* index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian
|
||||
*
|
||||
* 2x2 subsampled Cr:Cb plane 10 bits per channel
|
||||
*/
|
||||
[NvKmsSurfaceMemoryFormatY10___V10U10_N420] = DRM_FORMAT_P010,
|
||||
#endif
|
||||
|
||||
#if defined(DRM_FORMAT_P012)
|
||||
/*
|
||||
* 2 plane YCbCr MSB aligned
|
||||
* index 0 = Y plane, [15:0] Y:x [12:4] little endian
|
||||
* index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [12:4:12:4] little endian
|
||||
*
|
||||
* 2x2 subsampled Cr:Cb plane 12 bits per channel
|
||||
*/
|
||||
[NvKmsSurfaceMemoryFormatY12___V12U12_N420] = DRM_FORMAT_P012,
|
||||
#endif
|
||||
};
|
||||
|
||||
bool nv_drm_format_to_nvkms_format(u32 format,
|
||||
enum NvKmsSurfaceMemoryFormat *nvkms_format)
|
||||
{
|
||||
enum NvKmsSurfaceMemoryFormat i;
|
||||
for (i = 0; i < ARRAY_SIZE(nvkms_to_drm_format); i++) {
|
||||
/*
|
||||
* Note nvkms_to_drm_format[] is sparsely populated: it doesn't
|
||||
* handle all NvKmsSurfaceMemoryFormat values, so be sure to skip 0
|
||||
* entries when iterating through it.
|
||||
*/
|
||||
if (nvkms_to_drm_format[i] != 0 && nvkms_to_drm_format[i] == format) {
|
||||
*nvkms_format = i;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
uint32_t *nv_drm_format_array_alloc(
|
||||
unsigned int *count,
|
||||
const long unsigned int nvkms_format_mask)
|
||||
{
|
||||
enum NvKmsSurfaceMemoryFormat i;
|
||||
unsigned int max_count = hweight64(nvkms_format_mask);
|
||||
uint32_t *array = nv_drm_calloc(1, sizeof(uint32_t) * max_count);
|
||||
|
||||
if (array == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
*count = 0;
|
||||
for_each_set_bit(i, &nvkms_format_mask,
|
||||
sizeof(nvkms_format_mask) * BITS_PER_BYTE) {
|
||||
|
||||
if (i >= ARRAY_SIZE(nvkms_to_drm_format)) {
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note nvkms_to_drm_format[] is sparsely populated: it doesn't
|
||||
* handle all NvKmsSurfaceMemoryFormat values, so be sure to skip 0
|
||||
* entries when iterating through it.
|
||||
*/
|
||||
if (nvkms_to_drm_format[i] == 0) {
|
||||
continue;
|
||||
}
|
||||
array[(*count)++] = nvkms_to_drm_format[i];
|
||||
}
|
||||
|
||||
if (*count == 0) {
|
||||
nv_drm_free(array);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return array;
|
||||
}
|
||||
|
||||
bool nv_drm_format_is_yuv(u32 format)
|
||||
{
|
||||
#if defined(NV_DRM_FORMAT_INFO_HAS_IS_YUV)
|
||||
const struct drm_format_info *format_info = drm_format_info(format);
|
||||
return (format_info != NULL) && format_info->is_yuv;
|
||||
#else
|
||||
switch (format) {
|
||||
case DRM_FORMAT_YUYV:
|
||||
case DRM_FORMAT_UYVY:
|
||||
|
||||
case DRM_FORMAT_NV24:
|
||||
case DRM_FORMAT_NV42:
|
||||
case DRM_FORMAT_NV16:
|
||||
case DRM_FORMAT_NV61:
|
||||
case DRM_FORMAT_NV12:
|
||||
case DRM_FORMAT_NV21:
|
||||
|
||||
#if defined(DRM_FORMAT_P210)
|
||||
case DRM_FORMAT_P210:
|
||||
#endif
|
||||
#if defined(DRM_FORMAT_P010)
|
||||
case DRM_FORMAT_P010:
|
||||
#endif
|
||||
#if defined(DRM_FORMAT_P012)
|
||||
case DRM_FORMAT_P012:
|
||||
#endif
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
45
kernel-open/nvidia-drm/nvidia-drm-format.h
Normal file
45
kernel-open/nvidia-drm/nvidia-drm-format.h
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_FORMAT_H__
|
||||
#define __NVIDIA_DRM_FORMAT_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
|
||||
#include <drm/drm_fourcc.h>
|
||||
|
||||
#include "nvkms-format.h"
|
||||
|
||||
bool nv_drm_format_to_nvkms_format(u32 format,
|
||||
enum NvKmsSurfaceMemoryFormat *nvkms_format);
|
||||
|
||||
uint32_t *nv_drm_format_array_alloc(
|
||||
unsigned int *count,
|
||||
const long unsigned int nvkms_format_mask);
|
||||
|
||||
bool nv_drm_format_is_yuv(u32 format);
|
||||
|
||||
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
|
||||
|
||||
#endif /* __NVIDIA_DRM_FORMAT_H__ */
|
||||
256
kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c
Normal file
256
kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c
Normal file
@@ -0,0 +1,256 @@
|
||||
/*
|
||||
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
#include <drm/drm_prime.h>
|
||||
|
||||
#if defined(NV_DRM_DRMP_H_PRESENT)
|
||||
#include <drm/drmP.h>
|
||||
#endif
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
|
||||
#include "nvidia-drm-gem-dma-buf.h"
|
||||
#include "nvidia-drm-ioctl.h"
|
||||
|
||||
#include "linux/dma-buf.h"
|
||||
|
||||
static inline
|
||||
void __nv_drm_gem_dma_buf_free(struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = nv_gem->nv_dev;
|
||||
struct nv_drm_gem_dma_buf *nv_dma_buf = to_nv_dma_buf(nv_gem);
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
if (nv_dma_buf->base.pMemory) {
|
||||
/* Free NvKmsKapiMemory handle associated with this gem object */
|
||||
nvKms->freeMemory(nv_dev->pDevice, nv_dma_buf->base.pMemory);
|
||||
}
|
||||
#endif
|
||||
|
||||
drm_prime_gem_destroy(&nv_gem->base, nv_dma_buf->sgt);
|
||||
|
||||
nv_drm_free(nv_dma_buf);
|
||||
}
|
||||
|
||||
static int __nv_drm_gem_dma_buf_create_mmap_offset(
|
||||
struct nv_drm_device *nv_dev,
|
||||
struct nv_drm_gem_object *nv_gem,
|
||||
uint64_t *offset)
|
||||
{
|
||||
(void)nv_dev;
|
||||
return nv_drm_gem_create_mmap_offset(nv_gem, offset);
|
||||
}
|
||||
|
||||
static int __nv_drm_gem_dma_buf_mmap(struct nv_drm_gem_object *nv_gem,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
#if defined(NV_LINUX)
|
||||
struct dma_buf_attachment *attach = nv_gem->base.import_attach;
|
||||
struct dma_buf *dma_buf = attach->dmabuf;
|
||||
#endif
|
||||
struct file *old_file;
|
||||
int ret;
|
||||
|
||||
/* check if buffer supports mmap */
|
||||
#if defined(NV_BSD)
|
||||
/*
|
||||
* Most of the FreeBSD DRM code refers to struct file*, which is actually
|
||||
* a struct linux_file*. The dmabuf code in FreeBSD is not actually plumbed
|
||||
* through the same linuxkpi bits it seems (probably so it can be used
|
||||
* elsewhere), so dma_buf->file really is a native FreeBSD struct file...
|
||||
*/
|
||||
if (!nv_gem->base.filp->f_op->mmap)
|
||||
return -EINVAL;
|
||||
|
||||
/* readjust the vma */
|
||||
get_file(nv_gem->base.filp);
|
||||
old_file = vma->vm_file;
|
||||
vma->vm_file = nv_gem->base.filp;
|
||||
vma->vm_pgoff -= drm_vma_node_start(&nv_gem->base.vma_node);
|
||||
|
||||
ret = nv_gem->base.filp->f_op->mmap(nv_gem->base.filp, vma);
|
||||
|
||||
if (ret) {
|
||||
/* restore old parameters on failure */
|
||||
vma->vm_file = old_file;
|
||||
vma->vm_pgoff += drm_vma_node_start(&nv_gem->base.vma_node);
|
||||
fput(nv_gem->base.filp);
|
||||
} else {
|
||||
if (old_file)
|
||||
fput(old_file);
|
||||
}
|
||||
#else
|
||||
if (!dma_buf->file->f_op->mmap)
|
||||
return -EINVAL;
|
||||
|
||||
/* readjust the vma */
|
||||
get_file(dma_buf->file);
|
||||
old_file = vma->vm_file;
|
||||
vma->vm_file = dma_buf->file;
|
||||
vma->vm_pgoff -= drm_vma_node_start(&nv_gem->base.vma_node);
|
||||
|
||||
ret = dma_buf->file->f_op->mmap(dma_buf->file, vma);
|
||||
|
||||
if (ret) {
|
||||
/* restore old parameters on failure */
|
||||
vma->vm_file = old_file;
|
||||
vma->vm_pgoff += drm_vma_node_start(&nv_gem->base.vma_node);
|
||||
fput(dma_buf->file);
|
||||
} else {
|
||||
if (old_file)
|
||||
fput(old_file);
|
||||
}
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
const struct nv_drm_gem_object_funcs __nv_gem_dma_buf_ops = {
|
||||
.free = __nv_drm_gem_dma_buf_free,
|
||||
.create_mmap_offset = __nv_drm_gem_dma_buf_create_mmap_offset,
|
||||
.mmap = __nv_drm_gem_dma_buf_mmap,
|
||||
};
|
||||
|
||||
struct drm_gem_object*
|
||||
nv_drm_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct dma_buf *dma_buf = attach->dmabuf;
|
||||
struct nv_drm_gem_dma_buf *nv_dma_buf;
|
||||
struct NvKmsKapiMemory *pMemory;
|
||||
|
||||
if ((nv_dma_buf =
|
||||
nv_drm_calloc(1, sizeof(*nv_dma_buf))) == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// dma_buf->size must be a multiple of PAGE_SIZE
|
||||
BUG_ON(dma_buf->size % PAGE_SIZE);
|
||||
|
||||
pMemory = NULL;
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
pMemory = nvKms->getSystemMemoryHandleFromDmaBuf(nv_dev->pDevice,
|
||||
(NvP64)(NvUPtr)dma_buf,
|
||||
dma_buf->size - 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
nv_drm_gem_object_init(nv_dev, &nv_dma_buf->base,
|
||||
&__nv_gem_dma_buf_ops, dma_buf->size, pMemory);
|
||||
|
||||
nv_dma_buf->sgt = sgt;
|
||||
|
||||
return &nv_dma_buf->base.base;
|
||||
}
|
||||
|
||||
int nv_drm_gem_export_dmabuf_memory_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct drm_nvidia_gem_export_dmabuf_memory_params *p = data;
|
||||
struct nv_drm_gem_dma_buf *nv_dma_buf = NULL;
|
||||
int ret = 0;
|
||||
struct NvKmsKapiMemory *pTmpMemory = NULL;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (p->__pad != 0) {
|
||||
ret = -EINVAL;
|
||||
NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed");
|
||||
goto done;
|
||||
}
|
||||
|
||||
if ((nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(
|
||||
filep, p->handle)) == NULL) {
|
||||
ret = -EINVAL;
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to lookup DMA-BUF GEM object for export: 0x%08x",
|
||||
p->handle);
|
||||
goto done;
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
if (!nv_dma_buf->base.pMemory) {
|
||||
/*
|
||||
* Get RM system memory handle from SGT - RM will take a reference
|
||||
* on this GEM object to prevent the DMA-BUF from being unpinned
|
||||
* prematurely.
|
||||
*/
|
||||
pTmpMemory = nvKms->getSystemMemoryHandleFromSgt(
|
||||
nv_dev->pDevice,
|
||||
(NvP64)(NvUPtr)nv_dma_buf->sgt,
|
||||
(NvP64)(NvUPtr)&nv_dma_buf->base.base,
|
||||
nv_dma_buf->base.base.size - 1);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!nv_dma_buf->base.pMemory && !pTmpMemory) {
|
||||
ret = -ENOMEM;
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to get memory to export from DMA-BUF GEM object: 0x%08x",
|
||||
p->handle);
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!nvKms->exportMemory(nv_dev->pDevice,
|
||||
nv_dma_buf->base.pMemory ?
|
||||
nv_dma_buf->base.pMemory : pTmpMemory,
|
||||
p->nvkms_params_ptr,
|
||||
p->nvkms_params_size)) {
|
||||
ret = -EINVAL;
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to export memory from DMA-BUF GEM object: 0x%08x",
|
||||
p->handle);
|
||||
goto done;
|
||||
}
|
||||
|
||||
done:
|
||||
if (pTmpMemory) {
|
||||
/*
|
||||
* Release reference on RM system memory to prevent circular
|
||||
* refcounting. Another refcount will still be held by RM FD.
|
||||
*/
|
||||
nvKms->freeMemory(nv_dev->pDevice, pTmpMemory);
|
||||
}
|
||||
|
||||
if (nv_dma_buf != NULL) {
|
||||
nv_drm_gem_object_unreference_unlocked(&nv_dma_buf->base);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
75
kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h
Normal file
75
kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h
Normal file
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_GEM_DMA_BUF_H__
|
||||
#define __NVIDIA_DRM_GEM_DMA_BUF_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
#include "nvidia-drm-gem.h"
|
||||
|
||||
struct nv_drm_gem_dma_buf {
|
||||
struct nv_drm_gem_object base;
|
||||
struct sg_table *sgt;
|
||||
};
|
||||
|
||||
extern const struct nv_drm_gem_object_funcs __nv_gem_dma_buf_ops;
|
||||
|
||||
static inline struct nv_drm_gem_dma_buf *to_nv_dma_buf(
|
||||
struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
if (nv_gem != NULL) {
|
||||
return container_of(nv_gem, struct nv_drm_gem_dma_buf, base);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct nv_drm_gem_dma_buf *nv_drm_gem_object_dma_buf_lookup(
|
||||
struct drm_file *filp,
|
||||
u32 handle)
|
||||
{
|
||||
struct nv_drm_gem_object *nv_gem =
|
||||
nv_drm_gem_object_lookup(filp, handle);
|
||||
|
||||
if (nv_gem != NULL && nv_gem->ops != &__nv_gem_dma_buf_ops) {
|
||||
nv_drm_gem_object_unreference_unlocked(nv_gem);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return to_nv_dma_buf(nv_gem);
|
||||
}
|
||||
|
||||
struct drm_gem_object*
|
||||
nv_drm_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt);
|
||||
|
||||
int nv_drm_gem_export_dmabuf_memory_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __NVIDIA_DRM_GEM_DMA_BUF_H__ */
|
||||
643
kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c
Normal file
643
kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c
Normal file
@@ -0,0 +1,643 @@
|
||||
/*
|
||||
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
|
||||
#include "nvidia-drm-gem-nvkms-memory.h"
|
||||
#include "nvidia-drm-helper.h"
|
||||
#include "nvidia-drm-ioctl.h"
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_prime.h>
|
||||
|
||||
#include <linux/io.h>
|
||||
#if defined(NV_BSD)
|
||||
#include <vm/vm_pageout.h>
|
||||
#endif
|
||||
|
||||
static void __nv_drm_gem_nvkms_memory_free(struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = nv_gem->nv_dev;
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
|
||||
to_nv_nvkms_memory(nv_gem);
|
||||
|
||||
if (nv_nvkms_memory->physically_mapped) {
|
||||
if (nv_nvkms_memory->pWriteCombinedIORemapAddress != NULL) {
|
||||
iounmap(nv_nvkms_memory->pWriteCombinedIORemapAddress);
|
||||
}
|
||||
|
||||
nvKms->unmapMemory(nv_dev->pDevice,
|
||||
nv_nvkms_memory->base.pMemory,
|
||||
NVKMS_KAPI_MAPPING_TYPE_USER,
|
||||
nv_nvkms_memory->pPhysicalAddress);
|
||||
}
|
||||
|
||||
if (nv_nvkms_memory->pages_count != 0) {
|
||||
nvKms->freeMemoryPages((NvU64 *)nv_nvkms_memory->pages);
|
||||
}
|
||||
|
||||
/* Free NvKmsKapiMemory handle associated with this gem object */
|
||||
|
||||
nvKms->freeMemory(nv_dev->pDevice, nv_nvkms_memory->base.pMemory);
|
||||
|
||||
nv_drm_free(nv_nvkms_memory);
|
||||
}
|
||||
|
||||
static int __nv_drm_gem_nvkms_map(
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory);
|
||||
|
||||
static int __nv_drm_gem_nvkms_mmap(struct nv_drm_gem_object *nv_gem,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
|
||||
to_nv_nvkms_memory(nv_gem);
|
||||
|
||||
int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return drm_gem_mmap_obj(&nv_gem->base,
|
||||
drm_vma_node_size(&nv_gem->base.vma_node) << PAGE_SHIFT, vma);
|
||||
}
|
||||
|
||||
static vm_fault_t __nv_drm_gem_nvkms_handle_vma_fault(
|
||||
struct nv_drm_gem_object *nv_gem,
|
||||
struct vm_area_struct *vma,
|
||||
struct vm_fault *vmf)
|
||||
{
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
|
||||
to_nv_nvkms_memory(nv_gem);
|
||||
unsigned long address = vmf->address;
|
||||
struct drm_gem_object *gem = vma->vm_private_data;
|
||||
unsigned long page_offset, pfn;
|
||||
vm_fault_t ret;
|
||||
|
||||
page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node);
|
||||
|
||||
if (nv_nvkms_memory->pages_count == 0) {
|
||||
pfn = (unsigned long)(uintptr_t)nv_nvkms_memory->pPhysicalAddress;
|
||||
pfn >>= PAGE_SHIFT;
|
||||
#if defined(NV_LINUX)
|
||||
/*
|
||||
* FreeBSD doesn't set pgoff. We instead have pfn be the base physical
|
||||
* address, and we will calculate the index pidx from the virtual address.
|
||||
*
|
||||
* This only works because linux_cdev_pager_populate passes the pidx as
|
||||
* vmf->virtual_address. Then we turn the virtual address
|
||||
* into a physical page number.
|
||||
*/
|
||||
pfn += page_offset;
|
||||
#endif
|
||||
} else {
|
||||
BUG_ON(page_offset >= nv_nvkms_memory->pages_count);
|
||||
pfn = page_to_pfn(nv_nvkms_memory->pages[page_offset]);
|
||||
}
|
||||
|
||||
#if defined(NV_VMF_INSERT_PFN_PRESENT)
|
||||
ret = vmf_insert_pfn(vma, address, pfn);
|
||||
#else
|
||||
ret = vm_insert_pfn(vma, address, pfn);
|
||||
switch (ret) {
|
||||
case 0:
|
||||
case -EBUSY:
|
||||
/*
|
||||
* EBUSY indicates that another thread already handled
|
||||
* the faulted range.
|
||||
*/
|
||||
ret = VM_FAULT_NOPAGE;
|
||||
break;
|
||||
case -ENOMEM:
|
||||
ret = VM_FAULT_OOM;
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "Unhandled error in %s: %d\n", __FUNCTION__, ret);
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
break;
|
||||
}
|
||||
#endif /* defined(NV_VMF_INSERT_PFN_PRESENT) */
|
||||
return ret;
|
||||
#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup(
|
||||
struct drm_device *dev,
|
||||
const struct nv_drm_gem_object *nv_gem_src);
|
||||
|
||||
static int __nv_drm_gem_nvkms_map(
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory)
|
||||
{
|
||||
int ret = 0;
|
||||
struct nv_drm_device *nv_dev = nv_nvkms_memory->base.nv_dev;
|
||||
struct NvKmsKapiMemory *pMemory = nv_nvkms_memory->base.pMemory;
|
||||
|
||||
mutex_lock(&nv_nvkms_memory->map_lock);
|
||||
|
||||
if (nv_nvkms_memory->physically_mapped) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!nvKms->isVidmem(pMemory)) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!nvKms->mapMemory(nv_dev->pDevice,
|
||||
pMemory,
|
||||
NVKMS_KAPI_MAPPING_TYPE_USER,
|
||||
&nv_nvkms_memory->pPhysicalAddress)) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to map NvKmsKapiMemory 0x%p",
|
||||
pMemory);
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
|
||||
nv_nvkms_memory->pWriteCombinedIORemapAddress = ioremap_wc(
|
||||
(uintptr_t)nv_nvkms_memory->pPhysicalAddress,
|
||||
nv_nvkms_memory->base.base.size);
|
||||
|
||||
if (!nv_nvkms_memory->pWriteCombinedIORemapAddress) {
|
||||
NV_DRM_DEV_LOG_INFO(
|
||||
nv_dev,
|
||||
"Failed to ioremap_wc NvKmsKapiMemory 0x%p",
|
||||
pMemory);
|
||||
}
|
||||
|
||||
nv_nvkms_memory->physically_mapped = true;
|
||||
|
||||
done:
|
||||
mutex_unlock(&nv_nvkms_memory->map_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *__nv_drm_gem_nvkms_prime_vmap(
|
||||
struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
|
||||
to_nv_nvkms_memory(nv_gem);
|
||||
|
||||
int ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
|
||||
if (ret) {
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
if (nv_nvkms_memory->physically_mapped) {
|
||||
return nv_nvkms_memory->pWriteCombinedIORemapAddress;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this buffer isn't physically mapped, it might be backed by struct
|
||||
* pages. Use vmap in that case. Do a noncached mapping for system memory
|
||||
* as display is non io-coherent device in case of Tegra.
|
||||
*/
|
||||
if (nv_nvkms_memory->pages_count > 0) {
|
||||
return nv_drm_vmap(nv_nvkms_memory->pages,
|
||||
nv_nvkms_memory->pages_count,
|
||||
false);
|
||||
}
|
||||
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
static void __nv_drm_gem_nvkms_prime_vunmap(
|
||||
struct nv_drm_gem_object *nv_gem,
|
||||
void *address)
|
||||
{
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
|
||||
to_nv_nvkms_memory(nv_gem);
|
||||
|
||||
if (!nv_nvkms_memory->physically_mapped &&
|
||||
nv_nvkms_memory->pages_count > 0) {
|
||||
nv_drm_vunmap(address);
|
||||
}
|
||||
}
|
||||
|
||||
static int __nv_drm_gem_map_nvkms_memory_offset(
|
||||
struct nv_drm_device *nv_dev,
|
||||
struct nv_drm_gem_object *nv_gem,
|
||||
uint64_t *offset)
|
||||
{
|
||||
return nv_drm_gem_create_mmap_offset(nv_gem, offset);
|
||||
}
|
||||
|
||||
static struct sg_table *__nv_drm_gem_nvkms_memory_prime_get_sg_table(
|
||||
struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = nv_gem->nv_dev;
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory =
|
||||
to_nv_nvkms_memory(nv_gem);
|
||||
struct sg_table *sg_table;
|
||||
|
||||
if (nv_nvkms_memory->pages_count == 0) {
|
||||
NV_DRM_DEV_DEBUG_DRIVER(
|
||||
nv_dev,
|
||||
"Cannot create sg_table for NvKmsKapiMemory 0x%p",
|
||||
nv_gem->pMemory);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
sg_table = nv_drm_prime_pages_to_sg(nv_dev->dev,
|
||||
nv_nvkms_memory->pages,
|
||||
nv_nvkms_memory->pages_count);
|
||||
|
||||
return sg_table;
|
||||
}
|
||||
|
||||
const struct nv_drm_gem_object_funcs nv_gem_nvkms_memory_ops = {
|
||||
.free = __nv_drm_gem_nvkms_memory_free,
|
||||
.prime_dup = __nv_drm_gem_nvkms_prime_dup,
|
||||
.prime_vmap = __nv_drm_gem_nvkms_prime_vmap,
|
||||
.prime_vunmap = __nv_drm_gem_nvkms_prime_vunmap,
|
||||
.mmap = __nv_drm_gem_nvkms_mmap,
|
||||
.handle_vma_fault = __nv_drm_gem_nvkms_handle_vma_fault,
|
||||
.create_mmap_offset = __nv_drm_gem_map_nvkms_memory_offset,
|
||||
.prime_get_sg_table = __nv_drm_gem_nvkms_memory_prime_get_sg_table,
|
||||
};
|
||||
|
||||
static int __nv_drm_nvkms_gem_obj_init(
|
||||
struct nv_drm_device *nv_dev,
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory,
|
||||
struct NvKmsKapiMemory *pMemory,
|
||||
uint64_t size)
|
||||
{
|
||||
NvU64 *pages = NULL;
|
||||
NvU32 numPages = 0;
|
||||
|
||||
if ((size % PAGE_SIZE) != 0) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"NvKmsKapiMemory 0x%p size should be in a multiple of page size to "
|
||||
"create a gem object",
|
||||
pMemory);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_init(&nv_nvkms_memory->map_lock);
|
||||
nv_nvkms_memory->pPhysicalAddress = NULL;
|
||||
nv_nvkms_memory->pWriteCombinedIORemapAddress = NULL;
|
||||
nv_nvkms_memory->physically_mapped = false;
|
||||
|
||||
if (!nvKms->isVidmem(pMemory) &&
|
||||
!nvKms->getMemoryPages(nv_dev->pDevice,
|
||||
pMemory,
|
||||
&pages,
|
||||
&numPages)) {
|
||||
/* GetMemoryPages will fail for vidmem allocations,
|
||||
* but it should not fail for sysmem allocations. */
|
||||
NV_DRM_DEV_LOG_ERR(nv_dev,
|
||||
"Failed to get memory pages for NvKmsKapiMemory 0x%p",
|
||||
pMemory);
|
||||
return -ENOMEM;
|
||||
}
|
||||
nv_nvkms_memory->pages_count = numPages;
|
||||
nv_nvkms_memory->pages = (struct page **)pages;
|
||||
|
||||
nv_drm_gem_object_init(nv_dev,
|
||||
&nv_nvkms_memory->base,
|
||||
&nv_gem_nvkms_memory_ops,
|
||||
size,
|
||||
pMemory);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nv_drm_dumb_create(
|
||||
struct drm_file *file_priv,
|
||||
struct drm_device *dev, struct drm_mode_create_dumb *args)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
|
||||
uint8_t compressible = 0;
|
||||
struct NvKmsKapiMemory *pMemory;
|
||||
struct NvKmsKapiAllocateMemoryParams allocParams = { };
|
||||
int ret = 0;
|
||||
|
||||
args->pitch = roundup(args->width * ((args->bpp + 7) >> 3),
|
||||
nv_dev->pitchAlignment);
|
||||
|
||||
args->size = args->height * args->pitch;
|
||||
|
||||
/* Core DRM requires gem object size to be aligned with PAGE_SIZE */
|
||||
|
||||
args->size = roundup(args->size, PAGE_SIZE);
|
||||
|
||||
if ((nv_nvkms_memory =
|
||||
nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
allocParams.layout = NvKmsSurfaceMemoryLayoutPitch;
|
||||
allocParams.type = NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT;
|
||||
allocParams.size = args->size;
|
||||
allocParams.noDisplayCaching = true;
|
||||
allocParams.useVideoMemory = nv_dev->hasVideoMemory;
|
||||
allocParams.compressible = &compressible;
|
||||
|
||||
pMemory = nvKms->allocateMemory(nv_dev->pDevice, &allocParams);
|
||||
if (pMemory == NULL) {
|
||||
ret = -ENOMEM;
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to allocate NvKmsKapiMemory for dumb object of size %" NvU64_fmtu,
|
||||
args->size);
|
||||
goto nvkms_alloc_memory_failed;
|
||||
}
|
||||
|
||||
ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory, args->size);
|
||||
if (ret) {
|
||||
goto nvkms_gem_obj_init_failed;
|
||||
}
|
||||
|
||||
/* Always map dumb buffer memory up front. Clients are only expected
|
||||
* to use dumb buffers for software rendering, so they're not much use
|
||||
* without a CPU mapping.
|
||||
*/
|
||||
ret = __nv_drm_gem_nvkms_map(nv_nvkms_memory);
|
||||
if (ret) {
|
||||
nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return nv_drm_gem_handle_create_drop_reference(file_priv,
|
||||
&nv_nvkms_memory->base,
|
||||
&args->handle);
|
||||
|
||||
nvkms_gem_obj_init_failed:
|
||||
nvKms->freeMemory(nv_dev->pDevice, pMemory);
|
||||
|
||||
nvkms_alloc_memory_failed:
|
||||
nv_drm_free(nv_nvkms_memory);
|
||||
|
||||
fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int nv_drm_gem_import_nvkms_memory_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct drm_nvidia_gem_import_nvkms_memory_params *p = data;
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
|
||||
struct NvKmsKapiMemory *pMemory;
|
||||
int ret;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if ((nv_nvkms_memory =
|
||||
nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
pMemory = nvKms->importMemory(nv_dev->pDevice,
|
||||
p->mem_size,
|
||||
p->nvkms_params_ptr,
|
||||
p->nvkms_params_size);
|
||||
|
||||
if (pMemory == NULL) {
|
||||
ret = -EINVAL;
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to import NVKMS memory to GEM object");
|
||||
goto nvkms_import_memory_failed;
|
||||
}
|
||||
|
||||
ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory, p->mem_size);
|
||||
if (ret) {
|
||||
goto nvkms_gem_obj_init_failed;
|
||||
}
|
||||
|
||||
return nv_drm_gem_handle_create_drop_reference(filep,
|
||||
&nv_nvkms_memory->base,
|
||||
&p->handle);
|
||||
nvkms_gem_obj_init_failed:
|
||||
nvKms->freeMemory(nv_dev->pDevice, pMemory);
|
||||
|
||||
nvkms_import_memory_failed:
|
||||
nv_drm_free(nv_nvkms_memory);
|
||||
|
||||
failed:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct drm_nvidia_gem_export_nvkms_memory_params *p = data;
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = NULL;
|
||||
int ret = 0;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (p->__pad != 0) {
|
||||
ret = -EINVAL;
|
||||
NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed");
|
||||
goto done;
|
||||
}
|
||||
|
||||
if ((nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(
|
||||
filep,
|
||||
p->handle)) == NULL) {
|
||||
ret = -EINVAL;
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to lookup NVKMS gem object for export: 0x%08x",
|
||||
p->handle);
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!nvKms->exportMemory(nv_dev->pDevice,
|
||||
nv_nvkms_memory->base.pMemory,
|
||||
p->nvkms_params_ptr,
|
||||
p->nvkms_params_size)) {
|
||||
ret = -EINVAL;
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to export memory from NVKMS GEM object: 0x%08x", p->handle);
|
||||
goto done;
|
||||
}
|
||||
|
||||
done:
|
||||
if (nv_nvkms_memory != NULL) {
|
||||
nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct drm_nvidia_gem_alloc_nvkms_memory_params *p = data;
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = NULL;
|
||||
struct NvKmsKapiMemory *pMemory;
|
||||
struct NvKmsKapiAllocateMemoryParams allocParams = { };
|
||||
int ret = 0;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if ((p->__pad0 != 0) || (p->__pad1 != 0)) {
|
||||
ret = -EINVAL;
|
||||
NV_DRM_DEV_LOG_ERR(nv_dev, "non-zero value in padding field");
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if ((nv_nvkms_memory =
|
||||
nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
allocParams.layout = p->block_linear ?
|
||||
NvKmsSurfaceMemoryLayoutBlockLinear : NvKmsSurfaceMemoryLayoutPitch;
|
||||
allocParams.type = (p->flags & NV_GEM_ALLOC_NO_SCANOUT) ?
|
||||
NVKMS_KAPI_ALLOCATION_TYPE_OFFSCREEN : NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT;
|
||||
allocParams.size = p->memory_size;
|
||||
allocParams.useVideoMemory = nv_dev->hasVideoMemory;
|
||||
allocParams.compressible = &p->compressible;
|
||||
|
||||
pMemory = nvKms->allocateMemory(nv_dev->pDevice, &allocParams);
|
||||
if (pMemory == NULL) {
|
||||
ret = -EINVAL;
|
||||
NV_DRM_DEV_LOG_ERR(nv_dev,
|
||||
"Failed to allocate NVKMS memory for GEM object");
|
||||
goto nvkms_alloc_memory_failed;
|
||||
}
|
||||
|
||||
ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory,
|
||||
p->memory_size);
|
||||
if (ret) {
|
||||
goto nvkms_gem_obj_init_failed;
|
||||
}
|
||||
|
||||
return nv_drm_gem_handle_create_drop_reference(filep,
|
||||
&nv_nvkms_memory->base,
|
||||
&p->handle);
|
||||
|
||||
nvkms_gem_obj_init_failed:
|
||||
nvKms->freeMemory(nv_dev->pDevice, pMemory);
|
||||
|
||||
nvkms_alloc_memory_failed:
|
||||
nv_drm_free(nv_nvkms_memory);
|
||||
|
||||
failed:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup(
|
||||
struct drm_device *dev,
|
||||
const struct nv_drm_gem_object *nv_gem_src)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
const struct nv_drm_device *nv_dev_src;
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
|
||||
struct NvKmsKapiMemory *pMemory;
|
||||
|
||||
BUG_ON(nv_gem_src == NULL || nv_gem_src->ops != &nv_gem_nvkms_memory_ops);
|
||||
|
||||
nv_dev_src = to_nv_device(nv_gem_src->base.dev);
|
||||
|
||||
if ((nv_nvkms_memory =
|
||||
nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pMemory = nvKms->dupMemory(nv_dev->pDevice,
|
||||
nv_dev_src->pDevice, nv_gem_src->pMemory);
|
||||
if (pMemory == NULL) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to import NVKMS memory to GEM object");
|
||||
goto nvkms_dup_memory_failed;
|
||||
}
|
||||
|
||||
if (__nv_drm_nvkms_gem_obj_init(nv_dev,
|
||||
nv_nvkms_memory,
|
||||
pMemory,
|
||||
nv_gem_src->base.size)) {
|
||||
goto nvkms_gem_obj_init_failed;
|
||||
}
|
||||
|
||||
return &nv_nvkms_memory->base.base;
|
||||
|
||||
nvkms_gem_obj_init_failed:
|
||||
nvKms->freeMemory(nv_dev->pDevice, pMemory);
|
||||
|
||||
nvkms_dup_memory_failed:
|
||||
nv_drm_free(nv_nvkms_memory);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int nv_drm_dumb_map_offset(struct drm_file *file,
|
||||
struct drm_device *dev, uint32_t handle,
|
||||
uint64_t *offset)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if ((nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(
|
||||
file,
|
||||
handle)) == NULL) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to lookup gem object for mapping: 0x%08x",
|
||||
handle);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = __nv_drm_gem_map_nvkms_memory_offset(nv_dev,
|
||||
&nv_nvkms_memory->base, offset);
|
||||
|
||||
nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_DRIVER_HAS_DUMB_DESTROY)
|
||||
int nv_drm_dumb_destroy(struct drm_file *file,
|
||||
struct drm_device *dev,
|
||||
uint32_t handle)
|
||||
{
|
||||
return drm_gem_handle_delete(file, handle);
|
||||
}
|
||||
#endif /* NV_DRM_DRIVER_HAS_DUMB_DESTROY */
|
||||
|
||||
#endif
|
||||
118
kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h
Normal file
118
kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h
Normal file
@@ -0,0 +1,118 @@
|
||||
/*
|
||||
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_GEM_NVKMS_MEMORY_H__
|
||||
#define __NVIDIA_DRM_GEM_NVKMS_MEMORY_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
|
||||
#include "nvidia-drm-gem.h"
|
||||
|
||||
struct nv_drm_gem_nvkms_memory {
|
||||
struct nv_drm_gem_object base;
|
||||
|
||||
/*
|
||||
* Lock to protect concurrent writes to physically_mapped, pPhysicalAddress,
|
||||
* and pWriteCombinedIORemapAddress.
|
||||
*
|
||||
* __nv_drm_gem_nvkms_map(), the sole writer, is structured such that
|
||||
* readers are not required to hold the lock.
|
||||
*/
|
||||
struct mutex map_lock;
|
||||
bool physically_mapped;
|
||||
void *pPhysicalAddress;
|
||||
void *pWriteCombinedIORemapAddress;
|
||||
|
||||
struct page **pages;
|
||||
unsigned long pages_count;
|
||||
};
|
||||
|
||||
extern const struct nv_drm_gem_object_funcs nv_gem_nvkms_memory_ops;
|
||||
|
||||
static inline struct nv_drm_gem_nvkms_memory *to_nv_nvkms_memory(
|
||||
struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
if (nv_gem != NULL) {
|
||||
return container_of(nv_gem, struct nv_drm_gem_nvkms_memory, base);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct nv_drm_gem_nvkms_memory *to_nv_nvkms_memory_const(
|
||||
const struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
if (nv_gem != NULL) {
|
||||
return container_of(nv_gem, struct nv_drm_gem_nvkms_memory, base);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct nv_drm_gem_nvkms_memory *nv_drm_gem_object_nvkms_memory_lookup(
|
||||
struct drm_file *filp,
|
||||
u32 handle)
|
||||
{
|
||||
struct nv_drm_gem_object *nv_gem =
|
||||
nv_drm_gem_object_lookup(filp, handle);
|
||||
|
||||
if (nv_gem != NULL && nv_gem->ops != &nv_gem_nvkms_memory_ops) {
|
||||
nv_drm_gem_object_unreference_unlocked(nv_gem);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return to_nv_nvkms_memory(nv_gem);
|
||||
}
|
||||
|
||||
int nv_drm_dumb_create(
|
||||
struct drm_file *file_priv,
|
||||
struct drm_device *dev, struct drm_mode_create_dumb *args);
|
||||
|
||||
int nv_drm_gem_import_nvkms_memory_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep);
|
||||
|
||||
int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep);
|
||||
|
||||
int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep);
|
||||
|
||||
int nv_drm_dumb_map_offset(struct drm_file *file,
|
||||
struct drm_device *dev, uint32_t handle,
|
||||
uint64_t *offset);
|
||||
|
||||
#if defined(NV_DRM_DRIVER_HAS_DUMB_DESTROY)
|
||||
int nv_drm_dumb_destroy(struct drm_file *file,
|
||||
struct drm_device *dev,
|
||||
uint32_t handle);
|
||||
#endif /* NV_DRM_DRIVER_HAS_DUMB_DESTROY */
|
||||
|
||||
struct drm_gem_object *nv_drm_gem_nvkms_prime_import(
|
||||
struct drm_device *dev,
|
||||
struct drm_gem_object *gem);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __NVIDIA_DRM_GEM_NVKMS_MEMORY_H__ */
|
||||
235
kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c
Normal file
235
kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c
Normal file
@@ -0,0 +1,235 @@
|
||||
/*
|
||||
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
#include <drm/drm_prime.h>
|
||||
|
||||
#include "nvidia-drm-gem-user-memory.h"
|
||||
#include "nvidia-drm-helper.h"
|
||||
#include "nvidia-drm-ioctl.h"
|
||||
|
||||
#include "linux/dma-buf.h"
|
||||
#include "linux/mm.h"
|
||||
#include "nv-mm.h"
|
||||
#include "linux/pfn_t.h"
|
||||
|
||||
#if defined(NV_BSD)
|
||||
#include <vm/vm_pageout.h>
|
||||
#endif
|
||||
|
||||
static inline
|
||||
void __nv_drm_gem_user_memory_free(struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
|
||||
|
||||
nv_drm_unlock_user_pages(nv_user_memory->pages_count,
|
||||
nv_user_memory->pages);
|
||||
|
||||
nv_drm_free(nv_user_memory);
|
||||
}
|
||||
|
||||
static struct sg_table *__nv_drm_gem_user_memory_prime_get_sg_table(
|
||||
struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
|
||||
struct drm_gem_object *gem = &nv_gem->base;
|
||||
|
||||
return nv_drm_prime_pages_to_sg(gem->dev,
|
||||
nv_user_memory->pages,
|
||||
nv_user_memory->pages_count);
|
||||
}
|
||||
|
||||
static void *__nv_drm_gem_user_memory_prime_vmap(
|
||||
struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
|
||||
|
||||
return nv_drm_vmap(nv_user_memory->pages,
|
||||
nv_user_memory->pages_count,
|
||||
true);
|
||||
}
|
||||
|
||||
static void __nv_drm_gem_user_memory_prime_vunmap(
|
||||
struct nv_drm_gem_object *gem,
|
||||
void *address)
|
||||
{
|
||||
nv_drm_vunmap(address);
|
||||
}
|
||||
|
||||
static int __nv_drm_gem_user_memory_mmap(struct nv_drm_gem_object *nv_gem,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
int ret = drm_gem_mmap_obj(&nv_gem->base,
|
||||
drm_vma_node_size(&nv_gem->base.vma_node) << PAGE_SHIFT, vma);
|
||||
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enforce that user-memory GEM mappings are MAP_SHARED, to prevent COW
|
||||
* with MAP_PRIVATE and VM_MIXEDMAP
|
||||
*/
|
||||
if (!(vma->vm_flags & VM_SHARED)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nv_vm_flags_clear(vma, VM_PFNMAP);
|
||||
nv_vm_flags_clear(vma, VM_IO);
|
||||
nv_vm_flags_set(vma, VM_MIXEDMAP);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(NV_LINUX) && !defined(NV_VMF_INSERT_MIXED_PRESENT)
|
||||
static vm_fault_t __nv_vm_insert_mixed_helper(
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
unsigned long pfn)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = vm_insert_mixed(vma, address, pfn_to_pfn_t(pfn));
|
||||
|
||||
switch (ret) {
|
||||
case 0:
|
||||
case -EBUSY:
|
||||
/*
|
||||
* EBUSY indicates that another thread already handled
|
||||
* the faulted range.
|
||||
*/
|
||||
return VM_FAULT_NOPAGE;
|
||||
case -ENOMEM:
|
||||
return VM_FAULT_OOM;
|
||||
default:
|
||||
WARN_ONCE(1, "Unhandled error in %s: %d\n", __FUNCTION__, ret);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static vm_fault_t __nv_drm_gem_user_memory_handle_vma_fault(
|
||||
struct nv_drm_gem_object *nv_gem,
|
||||
struct vm_area_struct *vma,
|
||||
struct vm_fault *vmf)
|
||||
{
|
||||
struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem);
|
||||
unsigned long address = vmf->address;
|
||||
struct drm_gem_object *gem = vma->vm_private_data;
|
||||
unsigned long page_offset;
|
||||
unsigned long pfn;
|
||||
|
||||
page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node);
|
||||
BUG_ON(page_offset >= nv_user_memory->pages_count);
|
||||
pfn = page_to_pfn(nv_user_memory->pages[page_offset]);
|
||||
|
||||
#if !defined(NV_LINUX)
|
||||
return vmf_insert_pfn(vma, address, pfn);
|
||||
#elif defined(NV_VMF_INSERT_MIXED_PRESENT)
|
||||
return vmf_insert_mixed(vma, address, pfn_to_pfn_t(pfn));
|
||||
#else
|
||||
return __nv_vm_insert_mixed_helper(vma, address, pfn);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __nv_drm_gem_user_create_mmap_offset(
|
||||
struct nv_drm_device *nv_dev,
|
||||
struct nv_drm_gem_object *nv_gem,
|
||||
uint64_t *offset)
|
||||
{
|
||||
(void)nv_dev;
|
||||
return nv_drm_gem_create_mmap_offset(nv_gem, offset);
|
||||
}
|
||||
|
||||
const struct nv_drm_gem_object_funcs __nv_gem_user_memory_ops = {
|
||||
.free = __nv_drm_gem_user_memory_free,
|
||||
.prime_get_sg_table = __nv_drm_gem_user_memory_prime_get_sg_table,
|
||||
.prime_vmap = __nv_drm_gem_user_memory_prime_vmap,
|
||||
.prime_vunmap = __nv_drm_gem_user_memory_prime_vunmap,
|
||||
.mmap = __nv_drm_gem_user_memory_mmap,
|
||||
.handle_vma_fault = __nv_drm_gem_user_memory_handle_vma_fault,
|
||||
.create_mmap_offset = __nv_drm_gem_user_create_mmap_offset,
|
||||
};
|
||||
|
||||
int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
|
||||
struct drm_nvidia_gem_import_userspace_memory_params *params = data;
|
||||
struct nv_drm_gem_user_memory *nv_user_memory;
|
||||
|
||||
struct page **pages = NULL;
|
||||
unsigned long pages_count = 0;
|
||||
|
||||
int ret = 0;
|
||||
|
||||
if ((params->size % PAGE_SIZE) != 0) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Userspace memory 0x%" NvU64_fmtx " size should be in a multiple of page "
|
||||
"size to create a gem object",
|
||||
params->address);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pages_count = params->size / PAGE_SIZE;
|
||||
|
||||
ret = nv_drm_lock_user_pages(params->address, pages_count, &pages);
|
||||
|
||||
if (ret != 0) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to lock user pages for address 0x%" NvU64_fmtx ": %d",
|
||||
params->address, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((nv_user_memory =
|
||||
nv_drm_calloc(1, sizeof(*nv_user_memory))) == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
nv_user_memory->pages = pages;
|
||||
nv_user_memory->pages_count = pages_count;
|
||||
|
||||
nv_drm_gem_object_init(nv_dev,
|
||||
&nv_user_memory->base,
|
||||
&__nv_gem_user_memory_ops,
|
||||
params->size,
|
||||
NULL /* pMemory */);
|
||||
|
||||
return nv_drm_gem_handle_create_drop_reference(filep,
|
||||
&nv_user_memory->base,
|
||||
¶ms->handle);
|
||||
|
||||
failed:
|
||||
nv_drm_unlock_user_pages(pages_count, pages);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
71
kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h
Normal file
71
kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h
Normal file
@@ -0,0 +1,71 @@
|
||||
/*
|
||||
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_GEM_USER_MEMORY_H__
|
||||
#define __NVIDIA_DRM_GEM_USER_MEMORY_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
#include "nvidia-drm-gem.h"
|
||||
|
||||
struct nv_drm_gem_user_memory {
|
||||
struct nv_drm_gem_object base;
|
||||
struct page **pages;
|
||||
unsigned long pages_count;
|
||||
};
|
||||
|
||||
extern const struct nv_drm_gem_object_funcs __nv_gem_user_memory_ops;
|
||||
|
||||
static inline struct nv_drm_gem_user_memory *to_nv_user_memory(
|
||||
struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
if (nv_gem != NULL) {
|
||||
return container_of(nv_gem, struct nv_drm_gem_user_memory, base);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep);
|
||||
|
||||
static inline
|
||||
struct nv_drm_gem_user_memory *nv_drm_gem_object_user_memory_lookup(
|
||||
struct drm_file *filp,
|
||||
u32 handle)
|
||||
{
|
||||
struct nv_drm_gem_object *nv_gem =
|
||||
nv_drm_gem_object_lookup(filp, handle);
|
||||
|
||||
if (nv_gem != NULL && nv_gem->ops != &__nv_gem_user_memory_ops) {
|
||||
nv_drm_gem_object_unreference_unlocked(nv_gem);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return to_nv_user_memory(nv_gem);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __NVIDIA_DRM_GEM_USER_MEMORY_H__ */
|
||||
399
kernel-open/nvidia-drm/nvidia-drm-gem.c
Normal file
399
kernel-open/nvidia-drm/nvidia-drm-gem.c
Normal file
@@ -0,0 +1,399 @@
|
||||
/*
|
||||
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
#include "nvidia-drm-priv.h"
|
||||
#include "nvidia-drm-ioctl.h"
|
||||
#include "nvidia-drm-fence.h"
|
||||
#include "nvidia-drm-gem.h"
|
||||
#include "nvidia-drm-gem-nvkms-memory.h"
|
||||
#include "nvidia-drm-gem-user-memory.h"
|
||||
#include "nvidia-dma-resv-helper.h"
|
||||
#include "nvidia-drm-helper.h"
|
||||
#include "nvidia-drm-gem-dma-buf.h"
|
||||
#include "nvidia-drm-gem-nvkms-memory.h"
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_prime.h>
|
||||
#include <drm/drm_file.h>
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
#include <drm/drm_vma_manager.h>
|
||||
#endif
|
||||
|
||||
#include "linux/dma-buf.h"
|
||||
|
||||
#include "nv-mm.h"
|
||||
|
||||
void nv_drm_gem_free(struct drm_gem_object *gem)
|
||||
{
|
||||
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
|
||||
|
||||
/* Cleanup core gem object */
|
||||
drm_gem_object_release(&nv_gem->base);
|
||||
|
||||
#if !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
|
||||
nv_dma_resv_fini(&nv_gem->resv);
|
||||
#endif
|
||||
|
||||
nv_gem->ops->free(nv_gem);
|
||||
}
|
||||
|
||||
#if !defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS) && \
|
||||
defined(NV_DRM_GEM_OBJECT_VMAP_HAS_MAP_ARG)
|
||||
|
||||
/*
|
||||
* The 'dma_buf_map' structure is renamed to 'iosys_map' by the commit
|
||||
* 7938f4218168 ("dma-buf-map: Rename to iosys-map").
|
||||
*/
|
||||
#if defined(NV_LINUX_IOSYS_MAP_H_PRESENT)
|
||||
typedef struct iosys_map nv_sysio_map_t;
|
||||
#else
|
||||
typedef struct dma_buf_map nv_sysio_map_t;
|
||||
#endif
|
||||
|
||||
static int nv_drm_gem_vmap(struct drm_gem_object *gem,
|
||||
nv_sysio_map_t *map)
|
||||
{
|
||||
void *vaddr = nv_drm_gem_prime_vmap(gem);
|
||||
if (vaddr == NULL) {
|
||||
return -ENOMEM;
|
||||
} else if (IS_ERR(vaddr)) {
|
||||
return PTR_ERR(vaddr);
|
||||
}
|
||||
map->vaddr = vaddr;
|
||||
map->is_iomem = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nv_drm_gem_vunmap(struct drm_gem_object *gem,
|
||||
nv_sysio_map_t *map)
|
||||
{
|
||||
nv_drm_gem_prime_vunmap(gem, map->vaddr);
|
||||
map->vaddr = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT) || \
|
||||
!defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS)
|
||||
static struct drm_gem_object_funcs nv_drm_gem_funcs = {
|
||||
.free = nv_drm_gem_free,
|
||||
.get_sg_table = nv_drm_gem_prime_get_sg_table,
|
||||
|
||||
#if !defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS)
|
||||
.export = drm_gem_prime_export,
|
||||
#if defined(NV_DRM_GEM_OBJECT_VMAP_HAS_MAP_ARG)
|
||||
.vmap = nv_drm_gem_vmap,
|
||||
.vunmap = nv_drm_gem_vunmap,
|
||||
#else
|
||||
.vmap = nv_drm_gem_prime_vmap,
|
||||
.vunmap = nv_drm_gem_prime_vunmap,
|
||||
#endif
|
||||
.vm_ops = &nv_drm_gem_vma_ops,
|
||||
#endif
|
||||
};
|
||||
#endif
|
||||
|
||||
void nv_drm_gem_object_init(struct nv_drm_device *nv_dev,
|
||||
struct nv_drm_gem_object *nv_gem,
|
||||
const struct nv_drm_gem_object_funcs * const ops,
|
||||
size_t size,
|
||||
struct NvKmsKapiMemory *pMemory)
|
||||
{
|
||||
struct drm_device *dev = nv_dev->dev;
|
||||
|
||||
nv_gem->nv_dev = nv_dev;
|
||||
nv_gem->ops = ops;
|
||||
|
||||
nv_gem->pMemory = pMemory;
|
||||
|
||||
/* Initialize the gem object */
|
||||
|
||||
#if !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
|
||||
nv_dma_resv_init(&nv_gem->resv);
|
||||
#endif
|
||||
|
||||
#if !defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT)
|
||||
nv_gem->base.funcs = &nv_drm_gem_funcs;
|
||||
#endif
|
||||
|
||||
drm_gem_private_object_init(dev, &nv_gem->base, size);
|
||||
|
||||
/* Create mmap offset early for drm_gem_prime_mmap(), if possible. */
|
||||
if (nv_gem->ops->create_mmap_offset) {
|
||||
uint64_t offset;
|
||||
nv_gem->ops->create_mmap_offset(nv_dev, nv_gem, &offset);
|
||||
}
|
||||
}
|
||||
|
||||
struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf)
|
||||
{
|
||||
struct drm_gem_object *gem_dst;
|
||||
struct nv_drm_gem_object *nv_gem_src;
|
||||
|
||||
if (dma_buf->owner == dev->driver->fops->owner) {
|
||||
nv_gem_src = to_nv_gem_object(dma_buf->priv);
|
||||
|
||||
if (nv_gem_src->base.dev != dev &&
|
||||
nv_gem_src->ops->prime_dup != NULL) {
|
||||
/*
|
||||
* If we're importing from another NV device, try to handle the
|
||||
* import internally rather than attaching through the dma-buf
|
||||
* mechanisms. Importing from the same device is even easier,
|
||||
* and drm_gem_prime_import() handles that just fine.
|
||||
*/
|
||||
gem_dst = nv_gem_src->ops->prime_dup(dev, nv_gem_src);
|
||||
|
||||
if (gem_dst == NULL) {
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
return gem_dst;
|
||||
}
|
||||
}
|
||||
|
||||
return drm_gem_prime_import(dev, dma_buf);
|
||||
}
|
||||
|
||||
struct sg_table *nv_drm_gem_prime_get_sg_table(struct drm_gem_object *gem)
|
||||
{
|
||||
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
|
||||
|
||||
if (nv_gem->ops->prime_get_sg_table != NULL) {
|
||||
return nv_gem->ops->prime_get_sg_table(nv_gem);
|
||||
}
|
||||
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
void *nv_drm_gem_prime_vmap(struct drm_gem_object *gem)
|
||||
{
|
||||
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
|
||||
|
||||
if (nv_gem->ops->prime_vmap != NULL) {
|
||||
return nv_gem->ops->prime_vmap(nv_gem);
|
||||
}
|
||||
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
void nv_drm_gem_prime_vunmap(struct drm_gem_object *gem, void *address)
|
||||
{
|
||||
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
|
||||
|
||||
if (nv_gem->ops->prime_vunmap != NULL) {
|
||||
nv_gem->ops->prime_vunmap(nv_gem, address);
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
|
||||
nv_dma_resv_t* nv_drm_gem_prime_res_obj(struct drm_gem_object *obj)
|
||||
{
|
||||
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(obj);
|
||||
return nv_drm_gem_res_obj(nv_gem);
|
||||
}
|
||||
#endif
|
||||
|
||||
int nv_drm_gem_map_offset_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct drm_nvidia_gem_map_offset_params *params = data;
|
||||
struct nv_drm_gem_object *nv_gem;
|
||||
int ret;
|
||||
|
||||
if ((nv_gem = nv_drm_gem_object_lookup(filep,
|
||||
params->handle)) == NULL) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to lookup gem object for map: 0x%08x",
|
||||
params->handle);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* mmap offset creation is idempotent, fetch it by creating it again. */
|
||||
if (nv_gem->ops->create_mmap_offset) {
|
||||
ret = nv_gem->ops->create_mmap_offset(nv_dev, nv_gem, ¶ms->offset);
|
||||
} else {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Gem object type does not support mapping: 0x%08x",
|
||||
params->handle);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
nv_drm_gem_object_unreference_unlocked(nv_gem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
int nv_drm_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_file *priv = file->private_data;
|
||||
struct drm_device *dev = priv->minor->dev;
|
||||
struct drm_gem_object *obj = NULL;
|
||||
struct drm_vma_offset_node *node;
|
||||
int ret = 0;
|
||||
struct nv_drm_gem_object *nv_gem;
|
||||
|
||||
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
|
||||
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
|
||||
vma->vm_pgoff, vma_pages(vma));
|
||||
if (likely(node)) {
|
||||
obj = container_of(node, struct drm_gem_object, vma_node);
|
||||
/*
|
||||
* When the object is being freed, after it hits 0-refcnt it proceeds
|
||||
* to tear down the object. In the process it will attempt to remove
|
||||
* the VMA offset and so acquire this mgr->vm_lock. Therefore if we
|
||||
* find an object with a 0-refcnt that matches our range, we know it is
|
||||
* in the process of being destroyed and will be freed as soon as we
|
||||
* release the lock - so we have to check for the 0-refcnted object and
|
||||
* treat it as invalid.
|
||||
*/
|
||||
if (!kref_get_unless_zero(&obj->refcount))
|
||||
obj = NULL;
|
||||
}
|
||||
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
|
||||
|
||||
if (!obj)
|
||||
return -EINVAL;
|
||||
|
||||
nv_gem = to_nv_gem_object(obj);
|
||||
if (nv_gem->ops->mmap == NULL) {
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!drm_vma_node_is_allowed(node, file->private_data)) {
|
||||
ret = -EACCES;
|
||||
goto done;
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_VMA_OFFSET_NODE_HAS_READONLY)
|
||||
if (node->readonly) {
|
||||
if (vma->vm_flags & VM_WRITE) {
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
nv_vm_flags_clear(vma, VM_MAYWRITE);
|
||||
}
|
||||
#endif
|
||||
|
||||
ret = nv_gem->ops->mmap(nv_gem, vma);
|
||||
|
||||
done:
|
||||
nv_drm_gem_object_unreference_unlocked(nv_gem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
int nv_drm_gem_identify_object_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep)
|
||||
{
|
||||
struct drm_nvidia_gem_identify_object_params *p = data;
|
||||
struct nv_drm_gem_dma_buf *nv_dma_buf;
|
||||
struct nv_drm_gem_nvkms_memory *nv_nvkms_memory;
|
||||
struct nv_drm_gem_user_memory *nv_user_memory;
|
||||
struct nv_drm_gem_object *nv_gem = NULL;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(filep, p->handle);
|
||||
if (nv_dma_buf) {
|
||||
p->object_type = NV_GEM_OBJECT_DMABUF;
|
||||
nv_gem = &nv_dma_buf->base;
|
||||
goto done;
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(filep, p->handle);
|
||||
if (nv_nvkms_memory) {
|
||||
p->object_type = NV_GEM_OBJECT_NVKMS;
|
||||
nv_gem = &nv_nvkms_memory->base;
|
||||
goto done;
|
||||
}
|
||||
#endif
|
||||
|
||||
nv_user_memory = nv_drm_gem_object_user_memory_lookup(filep, p->handle);
|
||||
if (nv_user_memory) {
|
||||
p->object_type = NV_GEM_OBJECT_USERMEMORY;
|
||||
nv_gem = &nv_user_memory->base;
|
||||
goto done;
|
||||
}
|
||||
|
||||
p->object_type = NV_GEM_OBJECT_UNKNOWN;
|
||||
|
||||
done:
|
||||
if (nv_gem) {
|
||||
nv_drm_gem_object_unreference_unlocked(nv_gem);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* XXX Move these vma operations to os layer */
|
||||
|
||||
static vm_fault_t __nv_drm_vma_fault(struct vm_area_struct *vma,
|
||||
struct vm_fault *vmf)
|
||||
{
|
||||
struct drm_gem_object *gem = vma->vm_private_data;
|
||||
struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem);
|
||||
|
||||
if (!nv_gem) {
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
return nv_gem->ops->handle_vma_fault(nv_gem, vma, vmf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that nv_drm_vma_fault() can be called for different or same
|
||||
* ranges of the same drm_gem_object simultaneously.
|
||||
*/
|
||||
|
||||
#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG)
|
||||
static vm_fault_t nv_drm_vma_fault(struct vm_fault *vmf)
|
||||
{
|
||||
return __nv_drm_vma_fault(vmf->vma, vmf);
|
||||
}
|
||||
#else
|
||||
static vm_fault_t nv_drm_vma_fault(struct vm_area_struct *vma,
|
||||
struct vm_fault *vmf)
|
||||
{
|
||||
return __nv_drm_vma_fault(vma, vmf);
|
||||
}
|
||||
#endif
|
||||
|
||||
const struct vm_operations_struct nv_drm_gem_vma_ops = {
|
||||
.open = drm_gem_vm_open,
|
||||
.fault = nv_drm_vma_fault,
|
||||
.close = drm_gem_vm_close,
|
||||
};
|
||||
|
||||
#endif /* NV_DRM_AVAILABLE */
|
||||
187
kernel-open/nvidia-drm/nvidia-drm-gem.h
Normal file
187
kernel-open/nvidia-drm/nvidia-drm-gem.h
Normal file
@@ -0,0 +1,187 @@
|
||||
/*
|
||||
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_GEM_H__
|
||||
#define __NVIDIA_DRM_GEM_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
#include "nvidia-drm-priv.h"
|
||||
|
||||
#if defined(NV_DRM_DRMP_H_PRESENT)
|
||||
#include <drm/drmP.h>
|
||||
#endif
|
||||
|
||||
#include <drm/drm_gem.h>
|
||||
|
||||
#include "nvkms-kapi.h"
|
||||
#include "nv-mm.h"
|
||||
|
||||
#include "nvidia-dma-resv-helper.h"
|
||||
|
||||
#include "linux/dma-buf.h"
|
||||
|
||||
struct nv_drm_gem_object;
|
||||
|
||||
struct nv_drm_gem_object_funcs {
|
||||
void (*free)(struct nv_drm_gem_object *nv_gem);
|
||||
struct sg_table *(*prime_get_sg_table)(struct nv_drm_gem_object *nv_gem);
|
||||
void *(*prime_vmap)(struct nv_drm_gem_object *nv_gem);
|
||||
void (*prime_vunmap)(struct nv_drm_gem_object *nv_gem, void *address);
|
||||
struct drm_gem_object *(*prime_dup)(struct drm_device *dev,
|
||||
const struct nv_drm_gem_object *nv_gem_src);
|
||||
int (*mmap)(struct nv_drm_gem_object *nv_gem, struct vm_area_struct *vma);
|
||||
vm_fault_t (*handle_vma_fault)(struct nv_drm_gem_object *nv_gem,
|
||||
struct vm_area_struct *vma,
|
||||
struct vm_fault *vmf);
|
||||
int (*create_mmap_offset)(struct nv_drm_device *nv_dev,
|
||||
struct nv_drm_gem_object *nv_gem,
|
||||
uint64_t *offset);
|
||||
};
|
||||
|
||||
struct nv_drm_gem_object {
|
||||
struct drm_gem_object base;
|
||||
|
||||
struct nv_drm_device *nv_dev;
|
||||
const struct nv_drm_gem_object_funcs *ops;
|
||||
|
||||
struct NvKmsKapiMemory *pMemory;
|
||||
|
||||
#if !defined(NV_DRM_GEM_OBJECT_HAS_RESV)
|
||||
nv_dma_resv_t resv;
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline struct nv_drm_gem_object *to_nv_gem_object(
|
||||
struct drm_gem_object *gem)
|
||||
{
|
||||
if (gem != NULL) {
|
||||
return container_of(gem, struct nv_drm_gem_object, base);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void
|
||||
nv_drm_gem_object_unreference_unlocked(struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
#if defined(NV_DRM_GEM_OBJECT_PUT_UNLOCK_PRESENT)
|
||||
drm_gem_object_put_unlocked(&nv_gem->base);
|
||||
#else
|
||||
drm_gem_object_put(&nv_gem->base);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int nv_drm_gem_handle_create_drop_reference(
|
||||
struct drm_file *file_priv,
|
||||
struct nv_drm_gem_object *nv_gem,
|
||||
uint32_t *handle)
|
||||
{
|
||||
int ret = drm_gem_handle_create(file_priv, &nv_gem->base, handle);
|
||||
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
|
||||
nv_drm_gem_object_unreference_unlocked(nv_gem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int nv_drm_gem_create_mmap_offset(
|
||||
struct nv_drm_gem_object *nv_gem,
|
||||
uint64_t *offset)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if ((ret = drm_gem_create_mmap_offset(&nv_gem->base)) < 0) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_gem->nv_dev,
|
||||
"drm_gem_create_mmap_offset failed with error code %d",
|
||||
ret);
|
||||
goto done;
|
||||
}
|
||||
|
||||
*offset = drm_vma_node_offset_addr(&nv_gem->base.vma_node);
|
||||
|
||||
done:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void nv_drm_gem_free(struct drm_gem_object *gem);
|
||||
|
||||
static inline struct nv_drm_gem_object *nv_drm_gem_object_lookup(
|
||||
struct drm_file *filp,
|
||||
u32 handle)
|
||||
{
|
||||
return to_nv_gem_object(drm_gem_object_lookup(filp, handle));
|
||||
}
|
||||
|
||||
static inline int nv_drm_gem_handle_create(struct drm_file *filp,
|
||||
struct nv_drm_gem_object *nv_gem,
|
||||
uint32_t *handle)
|
||||
{
|
||||
return drm_gem_handle_create(filp, &nv_gem->base, handle);
|
||||
}
|
||||
|
||||
static inline nv_dma_resv_t *nv_drm_gem_res_obj(struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
#if defined(NV_DRM_GEM_OBJECT_HAS_RESV)
|
||||
return nv_gem->base.resv;
|
||||
#else
|
||||
return nv_gem->base.dma_buf ? nv_gem->base.dma_buf->resv : &nv_gem->resv;
|
||||
#endif
|
||||
}
|
||||
|
||||
void nv_drm_gem_object_init(struct nv_drm_device *nv_dev,
|
||||
struct nv_drm_gem_object *nv_gem,
|
||||
const struct nv_drm_gem_object_funcs * const ops,
|
||||
size_t size,
|
||||
struct NvKmsKapiMemory *pMemory);
|
||||
|
||||
struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf);
|
||||
|
||||
struct sg_table *nv_drm_gem_prime_get_sg_table(struct drm_gem_object *gem);
|
||||
|
||||
void *nv_drm_gem_prime_vmap(struct drm_gem_object *gem);
|
||||
|
||||
void nv_drm_gem_prime_vunmap(struct drm_gem_object *gem, void *address);
|
||||
|
||||
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
|
||||
nv_dma_resv_t* nv_drm_gem_prime_res_obj(struct drm_gem_object *obj);
|
||||
#endif
|
||||
|
||||
extern const struct vm_operations_struct nv_drm_gem_vma_ops;
|
||||
|
||||
int nv_drm_gem_map_offset_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep);
|
||||
|
||||
int nv_drm_mmap(struct file *file, struct vm_area_struct *vma);
|
||||
|
||||
int nv_drm_gem_identify_object_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep);
|
||||
|
||||
#endif /* NV_DRM_AVAILABLE */
|
||||
|
||||
#endif /* __NVIDIA_DRM_GEM_H__ */
|
||||
181
kernel-open/nvidia-drm/nvidia-drm-helper.c
Normal file
181
kernel-open/nvidia-drm/nvidia-drm-helper.c
Normal file
@@ -0,0 +1,181 @@
|
||||
/*
|
||||
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file contains snapshots of DRM helper functions from the
|
||||
* Linux kernel which are used by nvidia-drm.ko if the target kernel
|
||||
* predates the helper function. Having these functions consistently
|
||||
* present simplifies nvidia-drm.ko source.
|
||||
*/
|
||||
|
||||
#include "nvidia-drm-helper.h"
|
||||
#include "nvidia-drm-priv.h"
|
||||
#include "nvidia-drm-crtc.h"
|
||||
|
||||
#include "nvmisc.h"
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
|
||||
#if defined(NV_DRM_DRMP_H_PRESENT)
|
||||
#include <drm/drmP.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_DRM_ATOMIC_UAPI_H_PRESENT)
|
||||
#include <drm/drm_atomic_uapi.h>
|
||||
#endif
|
||||
|
||||
#include <drm/drm_framebuffer.h>
|
||||
|
||||
/*
|
||||
* drm_atomic_helper_disable_all() has been added by commit
|
||||
* 1494276000db789c6d2acd85747be4707051c801, which is Signed-off-by:
|
||||
* Thierry Reding <treding@nvidia.com>
|
||||
* Daniel Vetter <daniel.vetter@ffwll.ch>
|
||||
*
|
||||
* drm_atomic_helper_disable_all() is copied from
|
||||
* linux/drivers/gpu/drm/drm_atomic_helper.c and modified to use
|
||||
* nv_drm_for_each_crtc instead of drm_for_each_crtc to loop over all crtcs,
|
||||
* use nv_drm_for_each_*_in_state instead of for_each_connector_in_state to loop
|
||||
* over all modeset object states, and use drm_atomic_state_free() if
|
||||
* drm_atomic_state_put() is not available.
|
||||
*
|
||||
* drm_atomic_helper_disable_all() is copied from
|
||||
* linux/drivers/gpu/drm/drm_atomic_helper.c @
|
||||
* 49d70aeaeca8f62b72b7712ecd1e29619a445866, which has the following
|
||||
* copyright and license information:
|
||||
*
|
||||
* Copyright (C) 2014 Red Hat
|
||||
* Copyright (C) 2014 Intel Corp.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rob Clark <robdclark@gmail.com>
|
||||
* Daniel Vetter <daniel.vetter@ffwll.ch>
|
||||
*/
|
||||
int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
|
||||
struct drm_modeset_acquire_ctx *ctx)
|
||||
{
|
||||
struct drm_atomic_state *state;
|
||||
struct drm_connector_state *conn_state;
|
||||
struct drm_connector *conn;
|
||||
struct drm_plane_state *plane_state;
|
||||
struct drm_plane *plane;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct drm_crtc *crtc;
|
||||
unsigned plane_mask = 0;
|
||||
int ret, i;
|
||||
|
||||
state = drm_atomic_state_alloc(dev);
|
||||
if (!state)
|
||||
return -ENOMEM;
|
||||
|
||||
state->acquire_ctx = ctx;
|
||||
|
||||
nv_drm_for_each_crtc(crtc, dev) {
|
||||
crtc_state = drm_atomic_get_crtc_state(state, crtc);
|
||||
if (IS_ERR(crtc_state)) {
|
||||
ret = PTR_ERR(crtc_state);
|
||||
goto free;
|
||||
}
|
||||
|
||||
crtc_state->active = false;
|
||||
|
||||
ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL);
|
||||
if (ret < 0)
|
||||
goto free;
|
||||
|
||||
ret = drm_atomic_add_affected_planes(state, crtc);
|
||||
if (ret < 0)
|
||||
goto free;
|
||||
|
||||
ret = drm_atomic_add_affected_connectors(state, crtc);
|
||||
if (ret < 0)
|
||||
goto free;
|
||||
}
|
||||
|
||||
nv_drm_for_each_plane(plane, dev) {
|
||||
plane_state = drm_atomic_get_plane_state(state, plane);
|
||||
if (IS_ERR(plane_state)) {
|
||||
ret = PTR_ERR(plane_state);
|
||||
goto free;
|
||||
}
|
||||
|
||||
plane_state->rotation = DRM_MODE_ROTATE_0;
|
||||
}
|
||||
|
||||
nv_drm_for_each_connector_in_state(state, conn, conn_state, i) {
|
||||
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
|
||||
if (ret < 0)
|
||||
goto free;
|
||||
}
|
||||
|
||||
nv_drm_for_each_plane_in_state(state, plane, plane_state, i) {
|
||||
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
|
||||
if (ret < 0)
|
||||
goto free;
|
||||
|
||||
drm_atomic_set_fb_for_plane(plane_state, NULL);
|
||||
plane_mask |= NVBIT(drm_plane_index(plane));
|
||||
plane->old_fb = plane->fb;
|
||||
}
|
||||
|
||||
ret = drm_atomic_commit(state);
|
||||
free:
|
||||
if (plane_mask) {
|
||||
drm_for_each_plane_mask(plane, dev, plane_mask) {
|
||||
if (ret == 0) {
|
||||
plane->fb = NULL;
|
||||
plane->crtc = NULL;
|
||||
|
||||
WARN_ON(plane->state->fb);
|
||||
WARN_ON(plane->state->crtc);
|
||||
|
||||
if (plane->old_fb)
|
||||
drm_framebuffer_put(plane->old_fb);
|
||||
}
|
||||
plane->old_fb = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
drm_atomic_state_put(state);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
|
||||
476
kernel-open/nvidia-drm/nvidia-drm-helper.h
Normal file
476
kernel-open/nvidia-drm/nvidia-drm-helper.h
Normal file
@@ -0,0 +1,476 @@
|
||||
/*
|
||||
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_HELPER_H__
|
||||
#define __NVIDIA_DRM_HELPER_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
#if defined(NV_DRM_DRMP_H_PRESENT)
|
||||
#include <drm/drmP.h>
|
||||
#endif
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
|
||||
#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE)
|
||||
#include <drm/drm_blend.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For DRM_MODE_ROTATE_*, DRM_MODE_REFLECT_*, struct drm_color_ctm_3x4, and
|
||||
* struct drm_color_lut.
|
||||
*/
|
||||
#include <uapi/drm/drm_mode.h>
|
||||
|
||||
/*
|
||||
* Commit 1e13c5644c44 ("drm/drm_mode_object: increase max objects to
|
||||
* accommodate new color props") in Linux v6.8 increased the pre-object
|
||||
* property limit to from 24 to 64.
|
||||
*/
|
||||
#define NV_DRM_USE_EXTENDED_PROPERTIES (DRM_OBJECT_MAX_PROPERTY >= 64)
|
||||
|
||||
#include <drm/drm_prime.h>
|
||||
|
||||
static inline struct sg_table*
|
||||
nv_drm_prime_pages_to_sg(struct drm_device *dev,
|
||||
struct page **pages, unsigned int nr_pages)
|
||||
{
|
||||
#if defined(NV_DRM_PRIME_PAGES_TO_SG_HAS_DRM_DEVICE_ARG)
|
||||
return drm_prime_pages_to_sg(dev, pages, nr_pages);
|
||||
#else
|
||||
return drm_prime_pages_to_sg(pages, nr_pages);
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
|
||||
/*
|
||||
* drm_for_each_connector(), drm_for_each_crtc(), drm_for_each_fb(),
|
||||
* drm_for_each_encoder and drm_for_each_plane() were added by kernel
|
||||
* commit 6295d607ad34ee4e43aab3f20714c2ef7a6adea1 which was
|
||||
* Signed-off-by:
|
||||
* Daniel Vetter <daniel.vetter@intel.com>
|
||||
* drm_for_each_connector(), drm_for_each_crtc(), drm_for_each_fb(),
|
||||
* drm_for_each_encoder and drm_for_each_plane() are copied from
|
||||
* include/drm/drm_crtc @
|
||||
* 6295d607ad34ee4e43aab3f20714c2ef7a6adea1
|
||||
* which has the following copyright and license information:
|
||||
*
|
||||
* Copyright © 2006 Keith Packard
|
||||
* Copyright © 2007-2008 Dave Airlie
|
||||
* Copyright © 2007-2008 Intel Corporation
|
||||
* Jesse Barnes <jesse.barnes@intel.com>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include <drm/drm_crtc.h>
|
||||
|
||||
#if defined(drm_for_each_plane)
|
||||
#define nv_drm_for_each_plane(plane, dev) \
|
||||
drm_for_each_plane(plane, dev)
|
||||
#else
|
||||
#define nv_drm_for_each_plane(plane, dev) \
|
||||
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
|
||||
#endif
|
||||
|
||||
#if defined(drm_for_each_crtc)
|
||||
#define nv_drm_for_each_crtc(crtc, dev) \
|
||||
drm_for_each_crtc(crtc, dev)
|
||||
#else
|
||||
#define nv_drm_for_each_crtc(crtc, dev) \
|
||||
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
|
||||
#endif
|
||||
|
||||
#if defined(drm_for_each_encoder)
|
||||
#define nv_drm_for_each_encoder(encoder, dev) \
|
||||
drm_for_each_encoder(encoder, dev)
|
||||
#else
|
||||
#define nv_drm_for_each_encoder(encoder, dev) \
|
||||
list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
|
||||
#endif
|
||||
|
||||
#if defined(drm_for_each_fb)
|
||||
#define nv_drm_for_each_fb(fb, dev) \
|
||||
drm_for_each_fb(fb, dev)
|
||||
#else
|
||||
#define nv_drm_for_each_fb(fb, dev) \
|
||||
list_for_each_entry(fb, &(dev)->mode_config.fb_list, head)
|
||||
#endif
|
||||
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
|
||||
int nv_drm_atomic_helper_disable_all(struct drm_device *dev,
|
||||
struct drm_modeset_acquire_ctx *ctx);
|
||||
|
||||
/*
|
||||
* for_each_connector_in_state(), for_each_crtc_in_state() and
|
||||
* for_each_plane_in_state() were added by kernel commit
|
||||
* df63b9994eaf942afcdb946d27a28661d7dfbf2a which was Signed-off-by:
|
||||
* Ander Conselvan de Oliveira <ander.conselvan.de.oliveira@intel.com>
|
||||
* Daniel Vetter <daniel.vetter@ffwll.ch>
|
||||
*
|
||||
* for_each_connector_in_state(), for_each_crtc_in_state() and
|
||||
* for_each_plane_in_state() were copied from
|
||||
* include/drm/drm_atomic.h @
|
||||
* 21a01abbe32a3cbeb903378a24e504bfd9fe0648
|
||||
* which has the following copyright and license information:
|
||||
*
|
||||
* Copyright (C) 2014 Red Hat
|
||||
* Copyright (C) 2014 Intel Corp.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rob Clark <robdclark@gmail.com>
|
||||
* Daniel Vetter <daniel.vetter@ffwll.ch>
|
||||
*/
|
||||
|
||||
/**
|
||||
* nv_drm_for_each_connector_in_state - iterate over all connectors in an
|
||||
* atomic update
|
||||
* @__state: &struct drm_atomic_state pointer
|
||||
* @connector: &struct drm_connector iteration cursor
|
||||
* @connector_state: &struct drm_connector_state iteration cursor
|
||||
* @__i: int iteration cursor, for macro-internal use
|
||||
*
|
||||
* This iterates over all connectors in an atomic update. Note that before the
|
||||
* software state is committed (by calling drm_atomic_helper_swap_state(), this
|
||||
* points to the new state, while afterwards it points to the old state. Due to
|
||||
* this tricky confusion this macro is deprecated.
|
||||
*/
|
||||
#if !defined(for_each_connector_in_state)
|
||||
#define nv_drm_for_each_connector_in_state(__state, \
|
||||
connector, connector_state, __i) \
|
||||
for ((__i) = 0; \
|
||||
(__i) < (__state)->num_connector && \
|
||||
((connector) = (__state)->connectors[__i].ptr, \
|
||||
(connector_state) = (__state)->connectors[__i].state, 1); \
|
||||
(__i)++) \
|
||||
for_each_if (connector)
|
||||
#else
|
||||
#define nv_drm_for_each_connector_in_state(__state, \
|
||||
connector, connector_state, __i) \
|
||||
for_each_connector_in_state(__state, connector, connector_state, __i)
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* nv_drm_for_each_crtc_in_state - iterate over all CRTCs in an atomic update
|
||||
* @__state: &struct drm_atomic_state pointer
|
||||
* @crtc: &struct drm_crtc iteration cursor
|
||||
* @crtc_state: &struct drm_crtc_state iteration cursor
|
||||
* @__i: int iteration cursor, for macro-internal use
|
||||
*
|
||||
* This iterates over all CRTCs in an atomic update. Note that before the
|
||||
* software state is committed (by calling drm_atomic_helper_swap_state(), this
|
||||
* points to the new state, while afterwards it points to the old state. Due to
|
||||
* this tricky confusion this macro is deprecated.
|
||||
*/
|
||||
#if !defined(for_each_crtc_in_state)
|
||||
#define nv_drm_for_each_crtc_in_state(__state, crtc, crtc_state, __i) \
|
||||
for ((__i) = 0; \
|
||||
(__i) < (__state)->dev->mode_config.num_crtc && \
|
||||
((crtc) = (__state)->crtcs[__i].ptr, \
|
||||
(crtc_state) = (__state)->crtcs[__i].state, 1); \
|
||||
(__i)++) \
|
||||
for_each_if (crtc_state)
|
||||
#else
|
||||
#define nv_drm_for_each_crtc_in_state(__state, crtc, crtc_state, __i) \
|
||||
for_each_crtc_in_state(__state, crtc, crtc_state, __i)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* nv_drm_for_each_plane_in_state - iterate over all planes in an atomic update
|
||||
* @__state: &struct drm_atomic_state pointer
|
||||
* @plane: &struct drm_plane iteration cursor
|
||||
* @plane_state: &struct drm_plane_state iteration cursor
|
||||
* @__i: int iteration cursor, for macro-internal use
|
||||
*
|
||||
* This iterates over all planes in an atomic update. Note that before the
|
||||
* software state is committed (by calling drm_atomic_helper_swap_state(), this
|
||||
* points to the new state, while afterwards it points to the old state. Due to
|
||||
* this tricky confusion this macro is deprecated.
|
||||
*/
|
||||
#if !defined(for_each_plane_in_state)
|
||||
#define nv_drm_for_each_plane_in_state(__state, plane, plane_state, __i) \
|
||||
for ((__i) = 0; \
|
||||
(__i) < (__state)->dev->mode_config.num_total_plane && \
|
||||
((plane) = (__state)->planes[__i].ptr, \
|
||||
(plane_state) = (__state)->planes[__i].state, 1); \
|
||||
(__i)++) \
|
||||
for_each_if (plane_state)
|
||||
#else
|
||||
#define nv_drm_for_each_plane_in_state(__state, plane, plane_state, __i) \
|
||||
for_each_plane_in_state(__state, plane, plane_state, __i)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* for_each_new_plane_in_state() was added by kernel commit
|
||||
* 581e49fe6b411f407102a7f2377648849e0fa37f which was Signed-off-by:
|
||||
* Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
|
||||
* Daniel Vetter <daniel.vetter@ffwll.ch>
|
||||
*
|
||||
* This commit also added the old_state and new_state pointers to
|
||||
* __drm_planes_state. Because of this, the best that can be done on kernel
|
||||
* versions without this macro is for_each_plane_in_state.
|
||||
*/
|
||||
|
||||
/**
|
||||
* nv_drm_for_each_new_plane_in_state - iterate over all planes in an atomic update
|
||||
* @__state: &struct drm_atomic_state pointer
|
||||
* @plane: &struct drm_plane iteration cursor
|
||||
* @new_plane_state: &struct drm_plane_state iteration cursor for the new state
|
||||
* @__i: int iteration cursor, for macro-internal use
|
||||
*
|
||||
* This iterates over all planes in an atomic update, tracking only the new
|
||||
* state. This is useful in enable functions, where we need the new state the
|
||||
* hardware should be in when the atomic commit operation has completed.
|
||||
*/
|
||||
#if !defined(for_each_new_plane_in_state)
|
||||
#define nv_drm_for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
|
||||
nv_drm_for_each_plane_in_state(__state, plane, new_plane_state, __i)
|
||||
#else
|
||||
#define nv_drm_for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
|
||||
for_each_new_plane_in_state(__state, plane, new_plane_state, __i)
|
||||
#endif
|
||||
|
||||
#include <drm/drm_auth.h>
|
||||
#include <drm/drm_file.h>
|
||||
|
||||
/*
|
||||
* drm_file_get_master() added by commit 56f0729a510f ("drm: protect drm_master
|
||||
* pointers in drm_lease.c") in v5.15 (2021-07-20)
|
||||
*/
|
||||
static inline struct drm_master *nv_drm_file_get_master(struct drm_file *filep)
|
||||
{
|
||||
#if defined(NV_DRM_FILE_GET_MASTER_PRESENT)
|
||||
return drm_file_get_master(filep);
|
||||
#else
|
||||
if (filep->master) {
|
||||
return drm_master_get(filep->master);
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* drm_connector_for_each_possible_encoder() is added by commit
|
||||
* 83aefbb887b59df0b3520965c3701e01deacfc52 which was Signed-off-by:
|
||||
* Ville Syrjälä <ville.syrjala@linux.intel.com>
|
||||
*
|
||||
* drm_connector_for_each_possible_encoder() is copied from
|
||||
* include/drm/drm_connector.h @
|
||||
* 83aefbb887b59df0b3520965c3701e01deacfc52
|
||||
* which has the following copyright and license information:
|
||||
*
|
||||
* Copyright (c) 2016 Intel Corporation
|
||||
*
|
||||
* Permission to use, copy, modify, distribute, and sell this software and its
|
||||
* documentation for any purpose is hereby granted without fee, provided that
|
||||
* the above copyright notice appear in all copies and that both that copyright
|
||||
* notice and this permission notice appear in supporting documentation, and
|
||||
* that the name of the copyright holders not be used in advertising or
|
||||
* publicity pertaining to distribution of the software without specific,
|
||||
* written prior permission. The copyright holders make no representations
|
||||
* about the suitability of this software for any purpose. It is provided "as
|
||||
* is" without express or implied warranty.
|
||||
*
|
||||
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
||||
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
|
||||
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
||||
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
|
||||
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
||||
* OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <drm/drm_connector.h>
|
||||
|
||||
/**
|
||||
* nv_drm_connector_for_each_possible_encoder - iterate connector's possible
|
||||
* encoders
|
||||
* @connector: &struct drm_connector pointer
|
||||
* @encoder: &struct drm_encoder pointer used as cursor
|
||||
* @__i: int iteration cursor, for macro-internal use
|
||||
*/
|
||||
#if !defined(drm_connector_for_each_possible_encoder)
|
||||
|
||||
#if !defined(for_each_if)
|
||||
#define for_each_if(condition) if (!(condition)) {} else
|
||||
#endif
|
||||
|
||||
#define __nv_drm_connector_for_each_possible_encoder(connector, encoder, __i) \
|
||||
for ((__i) = 0; (__i) < ARRAY_SIZE((connector)->encoder_ids) && \
|
||||
(connector)->encoder_ids[(__i)] != 0; (__i)++) \
|
||||
for_each_if((encoder) = \
|
||||
drm_encoder_find((connector)->dev, NULL, \
|
||||
(connector)->encoder_ids[(__i)]))
|
||||
|
||||
|
||||
#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \
|
||||
{ \
|
||||
unsigned int __i; \
|
||||
__nv_drm_connector_for_each_possible_encoder(connector, encoder, __i)
|
||||
|
||||
#define nv_drm_connector_for_each_possible_encoder_end \
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#if NV_DRM_CONNECTOR_FOR_EACH_POSSIBLE_ENCODER_ARGUMENT_COUNT == 3
|
||||
|
||||
#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \
|
||||
{ \
|
||||
unsigned int __i; \
|
||||
drm_connector_for_each_possible_encoder(connector, encoder, __i)
|
||||
|
||||
#define nv_drm_connector_for_each_possible_encoder_end \
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \
|
||||
drm_connector_for_each_possible_encoder(connector, encoder)
|
||||
|
||||
#define nv_drm_connector_for_each_possible_encoder_end
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
static inline int
|
||||
nv_drm_connector_attach_encoder(struct drm_connector *connector,
|
||||
struct drm_encoder *encoder)
|
||||
{
|
||||
#if defined(NV_DRM_CONNECTOR_FUNCS_HAVE_MODE_IN_NAME)
|
||||
return drm_mode_connector_attach_encoder(connector, encoder);
|
||||
#else
|
||||
return drm_connector_attach_encoder(connector, encoder);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int
|
||||
nv_drm_connector_update_edid_property(struct drm_connector *connector,
|
||||
const struct edid *edid)
|
||||
{
|
||||
#if defined(NV_DRM_CONNECTOR_FUNCS_HAVE_MODE_IN_NAME)
|
||||
return drm_mode_connector_update_edid_property(connector, edid);
|
||||
#else
|
||||
return drm_connector_update_edid_property(connector, edid);
|
||||
#endif
|
||||
}
|
||||
|
||||
#include <drm/drm_fourcc.h>
|
||||
|
||||
static inline int nv_drm_format_num_planes(uint32_t format)
|
||||
{
|
||||
const struct drm_format_info *info = drm_format_info(format);
|
||||
return info != NULL ? info->num_planes : 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* DRM_FORMAT_MOD_VENDOR_NVIDIA was previously called
|
||||
* DRM_FORMAT_MOD_VNEDOR_NV.
|
||||
*/
|
||||
#if !defined(DRM_FORMAT_MOD_VENDOR_NVIDIA)
|
||||
#define DRM_FORMAT_MOD_VENDOR_NVIDIA DRM_FORMAT_MOD_VENDOR_NV
|
||||
#endif
|
||||
|
||||
/*
|
||||
* DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D is a relatively new addition to the
|
||||
* upstream kernel headers compared to the other format modifiers.
|
||||
*/
|
||||
#if !defined(DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D)
|
||||
#define DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(c, s, g, k, h) \
|
||||
fourcc_mod_code(NVIDIA, (0x10 | \
|
||||
((h) & 0xf) | \
|
||||
(((k) & 0xff) << 12) | \
|
||||
(((g) & 0x3) << 20) | \
|
||||
(((s) & 0x1) << 22) | \
|
||||
(((c) & 0x7) << 23)))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* DRM_UNLOCKED was removed with commit 2798ffcc1d6a ("drm: Remove locking for
|
||||
* legacy ioctls and DRM_UNLOCKED") in v6.8, but it was previously made
|
||||
* implicit for all non-legacy DRM driver IOCTLs since Linux v4.10 commit
|
||||
* fa5386459f06 "drm: Used DRM_LEGACY for all legacy functions" (Linux v4.4
|
||||
* commit ea487835e887 "drm: Enforce unlocked ioctl operation for kms driver
|
||||
* ioctls" previously did it only for drivers that set the DRM_MODESET flag), so
|
||||
* it was effectively a no-op anyway.
|
||||
*/
|
||||
#if !defined(NV_DRM_UNLOCKED_IOCTL_FLAG_PRESENT)
|
||||
#define DRM_UNLOCKED 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* struct drm_color_ctm_3x4 was added by commit 6872a189be50 ("drm/amd/display:
|
||||
* Add 3x4 CTM support for plane CTM") in v6.8. For backwards compatibility,
|
||||
* define it when not present.
|
||||
*/
|
||||
#if !defined(NV_DRM_COLOR_CTM_3X4_PRESENT)
|
||||
struct drm_color_ctm_3x4 {
|
||||
__u64 matrix[12];
|
||||
};
|
||||
#endif
|
||||
|
||||
#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */
|
||||
|
||||
#endif /* defined(NV_DRM_AVAILABLE) */
|
||||
|
||||
#endif /* __NVIDIA_DRM_HELPER_H__ */
|
||||
399
kernel-open/nvidia-drm/nvidia-drm-ioctl.h
Normal file
399
kernel-open/nvidia-drm/nvidia-drm-ioctl.h
Normal file
@@ -0,0 +1,399 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _UAPI_NVIDIA_DRM_IOCTL_H_
|
||||
#define _UAPI_NVIDIA_DRM_IOCTL_H_
|
||||
|
||||
#include <drm/drm.h>
|
||||
|
||||
/*
|
||||
* We should do our best to keep these values constant. Any change to these will
|
||||
* be backwards incompatible with client applications that might be using them
|
||||
*/
|
||||
#define DRM_NVIDIA_GET_CRTC_CRC32 0x00
|
||||
#define DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY 0x01
|
||||
#define DRM_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY 0x02
|
||||
#define DRM_NVIDIA_GET_DEV_INFO 0x03
|
||||
#define DRM_NVIDIA_FENCE_SUPPORTED 0x04
|
||||
#define DRM_NVIDIA_PRIME_FENCE_CONTEXT_CREATE 0x05
|
||||
#define DRM_NVIDIA_GEM_PRIME_FENCE_ATTACH 0x06
|
||||
#define DRM_NVIDIA_GET_CLIENT_CAPABILITY 0x08
|
||||
#define DRM_NVIDIA_GEM_EXPORT_NVKMS_MEMORY 0x09
|
||||
#define DRM_NVIDIA_GEM_MAP_OFFSET 0x0a
|
||||
#define DRM_NVIDIA_GEM_ALLOC_NVKMS_MEMORY 0x0b
|
||||
#define DRM_NVIDIA_GET_CRTC_CRC32_V2 0x0c
|
||||
#define DRM_NVIDIA_GEM_EXPORT_DMABUF_MEMORY 0x0d
|
||||
#define DRM_NVIDIA_GEM_IDENTIFY_OBJECT 0x0e
|
||||
#define DRM_NVIDIA_DMABUF_SUPPORTED 0x0f
|
||||
#define DRM_NVIDIA_GET_DPY_ID_FOR_CONNECTOR_ID 0x10
|
||||
#define DRM_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID 0x11
|
||||
#define DRM_NVIDIA_GRANT_PERMISSIONS 0x12
|
||||
#define DRM_NVIDIA_REVOKE_PERMISSIONS 0x13
|
||||
#define DRM_NVIDIA_SEMSURF_FENCE_CTX_CREATE 0x14
|
||||
#define DRM_NVIDIA_SEMSURF_FENCE_CREATE 0x15
|
||||
#define DRM_NVIDIA_SEMSURF_FENCE_WAIT 0x16
|
||||
#define DRM_NVIDIA_SEMSURF_FENCE_ATTACH 0x17
|
||||
#define DRM_NVIDIA_GET_DRM_FILE_UNIQUE_ID 0x18
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GEM_IMPORT_NVKMS_MEMORY \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY), \
|
||||
struct drm_nvidia_gem_import_nvkms_memory_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY), \
|
||||
struct drm_nvidia_gem_import_userspace_memory_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GET_DEV_INFO \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_DEV_INFO), \
|
||||
struct drm_nvidia_get_dev_info_params)
|
||||
|
||||
/*
|
||||
* XXX Solaris compiler has issues with DRM_IO. None of this is supported on
|
||||
* Solaris anyway, so just skip it.
|
||||
*
|
||||
* 'warning: suggest parentheses around arithmetic in operand of |'
|
||||
*/
|
||||
#if defined(NV_LINUX) || defined(NV_BSD)
|
||||
#define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED \
|
||||
DRM_IO(DRM_COMMAND_BASE + DRM_NVIDIA_FENCE_SUPPORTED)
|
||||
#define DRM_IOCTL_NVIDIA_DMABUF_SUPPORTED \
|
||||
DRM_IO(DRM_COMMAND_BASE + DRM_NVIDIA_DMABUF_SUPPORTED)
|
||||
#else
|
||||
#define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED 0
|
||||
#define DRM_IOCTL_NVIDIA_DMABUF_SUPPORTED 0
|
||||
#endif
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_PRIME_FENCE_CONTEXT_CREATE \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_PRIME_FENCE_CONTEXT_CREATE),\
|
||||
struct drm_nvidia_prime_fence_context_create_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GEM_PRIME_FENCE_ATTACH \
|
||||
DRM_IOW((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_PRIME_FENCE_ATTACH), \
|
||||
struct drm_nvidia_gem_prime_fence_attach_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GET_CLIENT_CAPABILITY \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CLIENT_CAPABILITY), \
|
||||
struct drm_nvidia_get_client_capability_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GET_CRTC_CRC32 \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CRTC_CRC32), \
|
||||
struct drm_nvidia_get_crtc_crc32_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GET_CRTC_CRC32_V2 \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CRTC_CRC32_V2), \
|
||||
struct drm_nvidia_get_crtc_crc32_v2_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GEM_EXPORT_NVKMS_MEMORY \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_EXPORT_NVKMS_MEMORY), \
|
||||
struct drm_nvidia_gem_export_nvkms_memory_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GEM_MAP_OFFSET \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_MAP_OFFSET), \
|
||||
struct drm_nvidia_gem_map_offset_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GEM_ALLOC_NVKMS_MEMORY \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_ALLOC_NVKMS_MEMORY), \
|
||||
struct drm_nvidia_gem_alloc_nvkms_memory_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GEM_EXPORT_DMABUF_MEMORY \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_EXPORT_DMABUF_MEMORY), \
|
||||
struct drm_nvidia_gem_export_dmabuf_memory_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GEM_IDENTIFY_OBJECT \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IDENTIFY_OBJECT), \
|
||||
struct drm_nvidia_gem_identify_object_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GET_DPY_ID_FOR_CONNECTOR_ID \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_DPY_ID_FOR_CONNECTOR_ID),\
|
||||
struct drm_nvidia_get_dpy_id_for_connector_id_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CONNECTOR_ID_FOR_DPY_ID),\
|
||||
struct drm_nvidia_get_connector_id_for_dpy_id_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GRANT_PERMISSIONS \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GRANT_PERMISSIONS), \
|
||||
struct drm_nvidia_grant_permissions_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_REVOKE_PERMISSIONS \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_REVOKE_PERMISSIONS), \
|
||||
struct drm_nvidia_revoke_permissions_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_CTX_CREATE \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + \
|
||||
DRM_NVIDIA_SEMSURF_FENCE_CTX_CREATE), \
|
||||
struct drm_nvidia_semsurf_fence_ctx_create_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_CREATE \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + \
|
||||
DRM_NVIDIA_SEMSURF_FENCE_CREATE), \
|
||||
struct drm_nvidia_semsurf_fence_create_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_WAIT \
|
||||
DRM_IOW((DRM_COMMAND_BASE + \
|
||||
DRM_NVIDIA_SEMSURF_FENCE_WAIT), \
|
||||
struct drm_nvidia_semsurf_fence_wait_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_SEMSURF_FENCE_ATTACH \
|
||||
DRM_IOW((DRM_COMMAND_BASE + \
|
||||
DRM_NVIDIA_SEMSURF_FENCE_ATTACH), \
|
||||
struct drm_nvidia_semsurf_fence_attach_params)
|
||||
|
||||
#define DRM_IOCTL_NVIDIA_GET_DRM_FILE_UNIQUE_ID \
|
||||
DRM_IOWR((DRM_COMMAND_BASE + \
|
||||
DRM_NVIDIA_GET_DRM_FILE_UNIQUE_ID), \
|
||||
struct drm_nvidia_get_drm_file_unique_id_params)
|
||||
|
||||
struct drm_nvidia_gem_import_nvkms_memory_params {
|
||||
uint64_t mem_size; /* IN */
|
||||
|
||||
uint64_t nvkms_params_ptr; /* IN */
|
||||
uint64_t nvkms_params_size; /* IN */
|
||||
|
||||
uint32_t handle; /* OUT */
|
||||
|
||||
uint32_t __pad;
|
||||
};
|
||||
|
||||
struct drm_nvidia_gem_import_userspace_memory_params {
|
||||
uint64_t size; /* IN Size of memory in bytes */
|
||||
uint64_t address; /* IN Virtual address of userspace memory */
|
||||
uint32_t handle; /* OUT Handle to gem object */
|
||||
};
|
||||
|
||||
struct drm_nvidia_get_dev_info_params {
|
||||
uint32_t gpu_id; /* OUT */
|
||||
uint32_t mig_device; /* OUT */
|
||||
uint32_t primary_index; /* OUT; the "card%d" value */
|
||||
|
||||
uint32_t supports_alloc; /* OUT */
|
||||
/* The generic_page_kind, page_kind_generation, and sector_layout
|
||||
* fields are only valid if supports_alloc is true.
|
||||
* See DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D definitions of these. */
|
||||
uint32_t generic_page_kind; /* OUT */
|
||||
uint32_t page_kind_generation; /* OUT */
|
||||
uint32_t sector_layout; /* OUT */
|
||||
uint32_t supports_sync_fd; /* OUT */
|
||||
uint32_t supports_semsurf; /* OUT */
|
||||
};
|
||||
|
||||
struct drm_nvidia_prime_fence_context_create_params {
|
||||
uint32_t handle; /* OUT GEM handle to fence context */
|
||||
|
||||
uint32_t index; /* IN Index of semaphore to use for fencing */
|
||||
uint64_t size; /* IN Size of semaphore surface in bytes */
|
||||
|
||||
/* Params for importing userspace semaphore surface */
|
||||
uint64_t import_mem_nvkms_params_ptr; /* IN */
|
||||
uint64_t import_mem_nvkms_params_size; /* IN */
|
||||
|
||||
/* Params for creating software signaling event */
|
||||
uint64_t event_nvkms_params_ptr; /* IN */
|
||||
uint64_t event_nvkms_params_size; /* IN */
|
||||
};
|
||||
|
||||
struct drm_nvidia_gem_prime_fence_attach_params {
|
||||
uint32_t handle; /* IN GEM handle to attach fence to */
|
||||
uint32_t fence_context_handle; /* IN GEM handle to fence context on which fence is run on */
|
||||
uint32_t sem_thresh; /* IN Semaphore value to reach before signal */
|
||||
uint32_t __pad;
|
||||
};
|
||||
|
||||
struct drm_nvidia_get_client_capability_params {
|
||||
uint64_t capability; /* IN Client capability enum */
|
||||
uint64_t value; /* OUT Client capability value */
|
||||
};
|
||||
|
||||
/* Struct that stores Crc value and if it is supported by hardware */
|
||||
struct drm_nvidia_crtc_crc32 {
|
||||
uint32_t value; /* Read value, undefined if supported is false */
|
||||
uint8_t supported; /* Supported boolean, true if readable by hardware */
|
||||
uint8_t __pad0;
|
||||
uint16_t __pad1;
|
||||
};
|
||||
|
||||
struct drm_nvidia_crtc_crc32_v2_out {
|
||||
struct drm_nvidia_crtc_crc32 compositorCrc32; /* OUT compositor hardware CRC32 value */
|
||||
struct drm_nvidia_crtc_crc32 rasterGeneratorCrc32; /* OUT raster generator CRC32 value */
|
||||
struct drm_nvidia_crtc_crc32 outputCrc32; /* OUT SF/SOR CRC32 value */
|
||||
};
|
||||
|
||||
struct drm_nvidia_get_crtc_crc32_v2_params {
|
||||
uint32_t crtc_id; /* IN CRTC identifier */
|
||||
struct drm_nvidia_crtc_crc32_v2_out crc32; /* OUT Crc32 output structure */
|
||||
};
|
||||
|
||||
struct drm_nvidia_get_crtc_crc32_params {
|
||||
uint32_t crtc_id; /* IN CRTC identifier */
|
||||
uint32_t crc32; /* OUT CRC32 value */
|
||||
};
|
||||
|
||||
struct drm_nvidia_gem_export_nvkms_memory_params {
|
||||
uint32_t handle; /* IN */
|
||||
uint32_t __pad;
|
||||
|
||||
uint64_t nvkms_params_ptr; /* IN */
|
||||
uint64_t nvkms_params_size; /* IN */
|
||||
};
|
||||
|
||||
struct drm_nvidia_gem_map_offset_params {
|
||||
uint32_t handle; /* IN Handle to gem object */
|
||||
uint32_t __pad;
|
||||
|
||||
uint64_t offset; /* OUT Fake offset */
|
||||
};
|
||||
|
||||
#define NV_GEM_ALLOC_NO_SCANOUT (1 << 0)
|
||||
|
||||
struct drm_nvidia_gem_alloc_nvkms_memory_params {
|
||||
uint32_t handle; /* OUT */
|
||||
uint8_t block_linear; /* IN */
|
||||
uint8_t compressible; /* IN/OUT */
|
||||
uint16_t __pad0;
|
||||
|
||||
uint64_t memory_size; /* IN */
|
||||
uint32_t flags; /* IN */
|
||||
uint32_t __pad1;
|
||||
};
|
||||
|
||||
struct drm_nvidia_gem_export_dmabuf_memory_params {
|
||||
uint32_t handle; /* IN GEM Handle*/
|
||||
uint32_t __pad;
|
||||
|
||||
uint64_t nvkms_params_ptr; /* IN */
|
||||
uint64_t nvkms_params_size; /* IN */
|
||||
};
|
||||
|
||||
typedef enum {
|
||||
NV_GEM_OBJECT_NVKMS,
|
||||
NV_GEM_OBJECT_DMABUF,
|
||||
NV_GEM_OBJECT_USERMEMORY,
|
||||
|
||||
NV_GEM_OBJECT_UNKNOWN = 0x7fffffff /* Force size of 32-bits. */
|
||||
} drm_nvidia_gem_object_type;
|
||||
|
||||
struct drm_nvidia_gem_identify_object_params {
|
||||
uint32_t handle; /* IN GEM handle*/
|
||||
drm_nvidia_gem_object_type object_type; /* OUT GEM object type */
|
||||
};
|
||||
|
||||
struct drm_nvidia_get_dpy_id_for_connector_id_params {
|
||||
uint32_t connectorId; /* IN */
|
||||
uint32_t dpyId; /* OUT */
|
||||
};
|
||||
|
||||
struct drm_nvidia_get_connector_id_for_dpy_id_params {
|
||||
uint32_t dpyId; /* IN */
|
||||
uint32_t connectorId; /* OUT */
|
||||
};
|
||||
|
||||
enum drm_nvidia_permissions_type {
|
||||
NV_DRM_PERMISSIONS_TYPE_MODESET = 2,
|
||||
NV_DRM_PERMISSIONS_TYPE_SUB_OWNER = 3
|
||||
};
|
||||
|
||||
struct drm_nvidia_grant_permissions_params {
|
||||
int32_t fd; /* IN */
|
||||
uint32_t dpyId; /* IN */
|
||||
uint32_t type; /* IN */
|
||||
};
|
||||
|
||||
struct drm_nvidia_revoke_permissions_params {
|
||||
uint32_t dpyId; /* IN */
|
||||
uint32_t type; /* IN */
|
||||
};
|
||||
|
||||
struct drm_nvidia_semsurf_fence_ctx_create_params {
|
||||
uint64_t index; /* IN Index of the desired semaphore in the
|
||||
* fence context's semaphore surface */
|
||||
|
||||
/* Params for importing userspace semaphore surface */
|
||||
uint64_t nvkms_params_ptr; /* IN */
|
||||
uint64_t nvkms_params_size; /* IN */
|
||||
|
||||
uint32_t handle; /* OUT GEM handle to fence context */
|
||||
uint32_t __pad;
|
||||
};
|
||||
|
||||
struct drm_nvidia_semsurf_fence_create_params {
|
||||
uint32_t fence_context_handle; /* IN GEM handle to fence context on which
|
||||
* fence is run on */
|
||||
|
||||
uint32_t timeout_value_ms; /* IN Timeout value in ms for the fence
|
||||
* after which the fence will be signaled
|
||||
* with its error status set to -ETIMEDOUT.
|
||||
* Default timeout value is 5000ms */
|
||||
|
||||
uint64_t wait_value; /* IN Semaphore value to reach before signal */
|
||||
|
||||
int32_t fd; /* OUT sync FD object representing the
|
||||
* semaphore at the specified index reaching
|
||||
* a value >= wait_value */
|
||||
uint32_t __pad;
|
||||
};
|
||||
|
||||
/*
|
||||
* Note there is no provision for timeouts in this ioctl. The kernel
|
||||
* documentation asserts timeouts should be handled by fence producers, and
|
||||
* that waiters should not second-guess their logic, as it is producers rather
|
||||
* than consumers that have better information when it comes to determining a
|
||||
* reasonable timeout for a given workload.
|
||||
*/
|
||||
struct drm_nvidia_semsurf_fence_wait_params {
|
||||
uint32_t fence_context_handle; /* IN GEM handle to fence context which will
|
||||
* be used to wait on the sync FD. Need not
|
||||
* be the fence context used to create the
|
||||
* sync FD. */
|
||||
|
||||
int32_t fd; /* IN sync FD object to wait on */
|
||||
|
||||
uint64_t pre_wait_value; /* IN Wait for the semaphore represented by
|
||||
* fence_context to reach this value before
|
||||
* waiting for the sync file. */
|
||||
|
||||
uint64_t post_wait_value; /* IN Signal the semaphore represented by
|
||||
* fence_context to this value after waiting
|
||||
* for the sync file */
|
||||
};
|
||||
|
||||
struct drm_nvidia_semsurf_fence_attach_params {
|
||||
uint32_t handle; /* IN GEM handle of buffer */
|
||||
|
||||
uint32_t fence_context_handle; /* IN GEM handle of fence context */
|
||||
|
||||
uint32_t timeout_value_ms; /* IN Timeout value in ms for the fence
|
||||
* after which the fence will be signaled
|
||||
* with its error status set to -ETIMEDOUT.
|
||||
* Default timeout value is 5000ms */
|
||||
|
||||
uint32_t shared; /* IN If true, fence will reserve shared
|
||||
* access to the buffer, otherwise it will
|
||||
* reserve exclusive access */
|
||||
|
||||
uint64_t wait_value; /* IN Semaphore value to reach before signal */
|
||||
};
|
||||
|
||||
struct drm_nvidia_get_drm_file_unique_id_params {
|
||||
uint64_t id; /* OUT Unique ID of the DRM file */
|
||||
};
|
||||
|
||||
#endif /* _UAPI_NVIDIA_DRM_IOCTL_H_ */
|
||||
66
kernel-open/nvidia-drm/nvidia-drm-linux.c
Normal file
66
kernel-open/nvidia-drm/nvidia-drm-linux.c
Normal file
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "nvidia-drm-os-interface.h"
|
||||
#include "nvidia-drm.h"
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
MODULE_PARM_DESC(
|
||||
modeset,
|
||||
"Enable atomic kernel modesetting (1 = enable, 0 = disable (default))");
|
||||
module_param_named(modeset, nv_drm_modeset_module_param, bool, 0400);
|
||||
|
||||
#if defined(NV_DRM_FBDEV_AVAILABLE)
|
||||
MODULE_PARM_DESC(
|
||||
fbdev,
|
||||
"Create a framebuffer device (1 = enable (default), 0 = disable)");
|
||||
module_param_named(fbdev, nv_drm_fbdev_module_param, bool, 0400);
|
||||
#endif
|
||||
|
||||
#endif /* NV_DRM_AVAILABLE */
|
||||
|
||||
/*************************************************************************
|
||||
* Linux loading support code.
|
||||
*************************************************************************/
|
||||
|
||||
static int __init nv_linux_drm_init(void)
|
||||
{
|
||||
return nv_drm_init();
|
||||
}
|
||||
|
||||
static void __exit nv_linux_drm_exit(void)
|
||||
{
|
||||
nv_drm_exit();
|
||||
}
|
||||
|
||||
module_init(nv_linux_drm_init);
|
||||
module_exit(nv_linux_drm_exit);
|
||||
|
||||
MODULE_LICENSE("Dual MIT/GPL");
|
||||
|
||||
MODULE_INFO(supported, "external");
|
||||
MODULE_VERSION(NV_VERSION_STRING);
|
||||
865
kernel-open/nvidia-drm/nvidia-drm-modeset.c
Normal file
865
kernel-open/nvidia-drm/nvidia-drm-modeset.c
Normal file
@@ -0,0 +1,865 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2025, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
|
||||
#include "nvidia-drm-priv.h"
|
||||
#include "nvidia-drm-modeset.h"
|
||||
#include "nvidia-drm-crtc.h"
|
||||
#include "nvidia-drm-os-interface.h"
|
||||
#include "nvidia-drm-helper.h"
|
||||
|
||||
#if defined(NV_DRM_DRMP_H_PRESENT)
|
||||
#include <drm/drmP.h>
|
||||
#endif
|
||||
|
||||
#include <drm/drm_vblank.h>
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
|
||||
#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST)
|
||||
#include <linux/nvhost.h>
|
||||
#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT)
|
||||
#include <linux/host1x-next.h>
|
||||
#endif
|
||||
|
||||
#include <linux/dma-fence.h>
|
||||
|
||||
struct nv_drm_atomic_state {
|
||||
struct NvKmsKapiRequestedModeSetConfig config;
|
||||
struct drm_atomic_state base;
|
||||
};
|
||||
|
||||
static inline struct nv_drm_atomic_state *to_nv_atomic_state(
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
return container_of(state, struct nv_drm_atomic_state, base);
|
||||
}
|
||||
|
||||
struct drm_atomic_state *nv_drm_atomic_state_alloc(struct drm_device *dev)
|
||||
{
|
||||
struct nv_drm_atomic_state *nv_state =
|
||||
nv_drm_calloc(1, sizeof(*nv_state));
|
||||
|
||||
if (nv_state == NULL || drm_atomic_state_init(dev, &nv_state->base) < 0) {
|
||||
nv_drm_free(nv_state);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &nv_state->base;
|
||||
}
|
||||
|
||||
void nv_drm_atomic_state_clear(struct drm_atomic_state *state)
|
||||
{
|
||||
drm_atomic_state_default_clear(state);
|
||||
}
|
||||
|
||||
void nv_drm_atomic_state_free(struct drm_atomic_state *state)
|
||||
{
|
||||
struct nv_drm_atomic_state *nv_state =
|
||||
to_nv_atomic_state(state);
|
||||
drm_atomic_state_default_release(state);
|
||||
nv_drm_free(nv_state);
|
||||
}
|
||||
|
||||
/**
|
||||
* __will_generate_flip_event - Check whether event is going to be generated by
|
||||
* hardware when it flips from old crtc/plane state to current one. This
|
||||
* function is called after drm_atomic_helper_swap_state(), therefore new state
|
||||
* is swapped into current state.
|
||||
*/
|
||||
static bool __will_generate_flip_event(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
{
|
||||
struct drm_crtc_state *new_crtc_state = crtc->state;
|
||||
struct nv_drm_crtc_state *nv_new_crtc_state =
|
||||
to_nv_crtc_state(new_crtc_state);
|
||||
struct drm_plane_state *old_plane_state = NULL;
|
||||
struct drm_plane *plane = NULL;
|
||||
int i;
|
||||
|
||||
if (!old_crtc_state->active && !new_crtc_state->active) {
|
||||
/*
|
||||
* crtc is not active in old and new states therefore all planes are
|
||||
* disabled, hardware can not generate flip events.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Find out whether primary & overlay flip done events will be generated. */
|
||||
nv_drm_for_each_plane_in_state(old_crtc_state->state,
|
||||
plane, old_plane_state, i) {
|
||||
if (old_plane_state->crtc != crtc) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Hardware generates flip event for only those
|
||||
* planes which were active previously.
|
||||
*/
|
||||
if (old_crtc_state->active && old_plane_state->fb != NULL) {
|
||||
nv_new_crtc_state->nv_flip->pending_events++;
|
||||
}
|
||||
}
|
||||
|
||||
return nv_new_crtc_state->nv_flip->pending_events != 0;
|
||||
}
|
||||
|
||||
static int __nv_drm_put_back_post_fence_fd(
|
||||
struct nv_drm_plane_state *plane_state,
|
||||
const struct NvKmsKapiLayerReplyConfig *layer_reply_config)
|
||||
{
|
||||
int fd = layer_reply_config->postSyncptFd;
|
||||
int ret = 0;
|
||||
|
||||
if ((fd >= 0) && (plane_state->fd_user_ptr != NULL)) {
|
||||
ret = copy_to_user(plane_state->fd_user_ptr, &fd, sizeof(fd));
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*! set back to Null and let set_property specify it again */
|
||||
plane_state->fd_user_ptr = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct nv_drm_plane_fence_cb_data {
|
||||
struct dma_fence_cb dma_fence_cb;
|
||||
struct nv_drm_device *nv_dev;
|
||||
NvU32 semaphore_index;
|
||||
};
|
||||
|
||||
static void
|
||||
__nv_drm_plane_fence_cb(
|
||||
struct dma_fence *fence,
|
||||
struct dma_fence_cb *cb_data
|
||||
)
|
||||
{
|
||||
struct nv_drm_plane_fence_cb_data *fence_data =
|
||||
container_of(cb_data, typeof(*fence_data), dma_fence_cb);
|
||||
struct nv_drm_device *nv_dev = fence_data->nv_dev;
|
||||
|
||||
dma_fence_put(fence);
|
||||
nvKms->signalDisplaySemaphore(nv_dev->pDevice, fence_data->semaphore_index);
|
||||
nv_drm_free(fence_data);
|
||||
}
|
||||
|
||||
static int __nv_drm_convert_in_fences(
|
||||
struct nv_drm_device *nv_dev,
|
||||
struct drm_atomic_state *state,
|
||||
struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_plane *plane = NULL;
|
||||
struct drm_plane_state *plane_state = NULL;
|
||||
struct nv_drm_plane *nv_plane = NULL;
|
||||
struct NvKmsKapiLayerRequestedConfig *plane_req_config = NULL;
|
||||
struct NvKmsKapiHeadRequestedConfig *head_req_config =
|
||||
&to_nv_crtc_state(crtc_state)->req_config;
|
||||
struct nv_drm_plane_fence_cb_data *fence_data;
|
||||
uint32_t semaphore_index;
|
||||
uint32_t idx_count;
|
||||
int ret, i;
|
||||
|
||||
if (!crtc_state->active) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
nv_drm_for_each_new_plane_in_state(state, plane, plane_state, i) {
|
||||
if ((plane->type == DRM_PLANE_TYPE_CURSOR) ||
|
||||
(plane_state->crtc != crtc) ||
|
||||
(plane_state->fence == NULL)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
nv_plane = to_nv_plane(plane);
|
||||
plane_req_config =
|
||||
&head_req_config->layerRequestedConfig[nv_plane->layer_idx];
|
||||
|
||||
if (nv_dev->supportsSyncpts) {
|
||||
#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST)
|
||||
#if defined(NV_NVHOST_DMA_FENCE_UNPACK_PRESENT)
|
||||
int ret =
|
||||
nvhost_dma_fence_unpack(
|
||||
plane_state->fence,
|
||||
&plane_req_config->config.syncParams.u.syncpt.preSyncptId,
|
||||
&plane_req_config->config.syncParams.u.syncpt.preSyncptValue);
|
||||
if (ret == 0) {
|
||||
plane_req_config->config.syncParams.preSyncptSpecified = true;
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
#elif defined(NV_LINUX_HOST1X_NEXT_H_PRESENT)
|
||||
int ret =
|
||||
host1x_fence_extract(
|
||||
plane_state->fence,
|
||||
&plane_req_config->config.syncParams.u.syncpt.preSyncptId,
|
||||
&plane_req_config->config.syncParams.u.syncpt.preSyncptValue);
|
||||
if (ret == 0) {
|
||||
plane_req_config->config.syncParams.preSyncptSpecified = true;
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Syncpt extraction failed, or syncpts are not supported.
|
||||
* Use general DRM fence support with semaphores instead.
|
||||
*/
|
||||
if (plane_req_config->config.syncParams.postSyncptRequested) {
|
||||
// Can't mix Syncpts and semaphores in a given request.
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (idx_count = 0; idx_count < nv_dev->display_semaphores.count; idx_count++) {
|
||||
semaphore_index = nv_drm_next_display_semaphore(nv_dev);
|
||||
if (nvKms->tryInitDisplaySemaphore(nv_dev->pDevice, semaphore_index)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (idx_count == nv_dev->display_semaphores.count) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to initialize semaphore for plane fence");
|
||||
/*
|
||||
* This should only happen if the semaphore pool was somehow
|
||||
* exhausted. Waiting a bit and retrying may help in that case.
|
||||
*/
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
plane_req_config->config.syncParams.semaphoreSpecified = true;
|
||||
plane_req_config->config.syncParams.u.semaphore.index = semaphore_index;
|
||||
|
||||
fence_data = nv_drm_calloc(1, sizeof(*fence_data));
|
||||
|
||||
if (!fence_data) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to allocate callback data for plane fence");
|
||||
nvKms->cancelDisplaySemaphore(nv_dev->pDevice, semaphore_index);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
fence_data->nv_dev = nv_dev;
|
||||
fence_data->semaphore_index = semaphore_index;
|
||||
|
||||
ret = dma_fence_add_callback(plane_state->fence,
|
||||
&fence_data->dma_fence_cb,
|
||||
__nv_drm_plane_fence_cb);
|
||||
|
||||
switch (ret) {
|
||||
case -ENOENT:
|
||||
/* The fence is already signaled */
|
||||
__nv_drm_plane_fence_cb(plane_state->fence,
|
||||
&fence_data->dma_fence_cb);
|
||||
#if defined(fallthrough)
|
||||
fallthrough;
|
||||
#else
|
||||
/* Fallthrough */
|
||||
#endif
|
||||
case 0:
|
||||
/*
|
||||
* The plane state's fence reference has either been consumed or
|
||||
* belongs to the outstanding callback now.
|
||||
*/
|
||||
plane_state->fence = NULL;
|
||||
break;
|
||||
default:
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed plane fence callback registration");
|
||||
/* Fence callback registration failed */
|
||||
nvKms->cancelDisplaySemaphore(nv_dev->pDevice, semaphore_index);
|
||||
nv_drm_free(fence_data);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __nv_drm_get_syncpt_data(
|
||||
struct nv_drm_device *nv_dev,
|
||||
struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state,
|
||||
struct NvKmsKapiRequestedModeSetConfig *requested_config,
|
||||
struct NvKmsKapiModeSetReplyConfig *reply_config)
|
||||
{
|
||||
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
|
||||
struct NvKmsKapiHeadReplyConfig *head_reply_config;
|
||||
struct nv_drm_plane_state *plane_state;
|
||||
struct drm_crtc_state *new_crtc_state = crtc->state;
|
||||
struct drm_plane_state *old_plane_state = NULL;
|
||||
struct drm_plane_state *new_plane_state = NULL;
|
||||
struct drm_plane *plane = NULL;
|
||||
int i, ret;
|
||||
|
||||
if (!old_crtc_state->active && !new_crtc_state->active) {
|
||||
/*
|
||||
* crtc is not active in old and new states therefore all planes are
|
||||
* disabled, exit early.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
head_reply_config = &reply_config->headReplyConfig[nv_crtc->head];
|
||||
|
||||
nv_drm_for_each_plane_in_state(old_crtc_state->state, plane, old_plane_state, i) {
|
||||
struct nv_drm_plane *nv_plane = to_nv_plane(plane);
|
||||
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR || old_plane_state->crtc != crtc) {
|
||||
continue;
|
||||
}
|
||||
|
||||
new_plane_state = plane->state;
|
||||
|
||||
if (new_plane_state->crtc != crtc) {
|
||||
continue;
|
||||
}
|
||||
|
||||
plane_state = to_nv_drm_plane_state(new_plane_state);
|
||||
|
||||
ret = __nv_drm_put_back_post_fence_fd(
|
||||
plane_state,
|
||||
&head_reply_config->layerReplyConfig[nv_plane->layer_idx]);
|
||||
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* nv_drm_atomic_commit - validate/commit modeset config
|
||||
* @dev: DRM device
|
||||
* @state: atomic state tracking atomic update
|
||||
* @commit: commit/check modeset config associated with atomic update
|
||||
*
|
||||
* @state tracks atomic update and modeset objects affected
|
||||
* by the atomic update, but the state of the modeset objects it contains
|
||||
* depends on the current stage of the update.
|
||||
* At the commit stage, the proposed state is already stored in the current
|
||||
* state, and @state contains old state for all affected modeset objects.
|
||||
* At the check/validation stage, @state contains the proposed state for
|
||||
* all affected objects.
|
||||
*
|
||||
* Sequence of atomic update -
|
||||
* 1. The check/validation of proposed atomic state,
|
||||
* 2. Do any other steps that might fail,
|
||||
* 3. Put the proposed state into the current state pointers,
|
||||
* 4. Actually commit the hardware state,
|
||||
* 5. Cleanup old state.
|
||||
*
|
||||
* The function nv_drm_atomic_apply_modeset_config() is getting called
|
||||
* at stages (1) and (4) after drm_atomic_helper_swap_state().
|
||||
*/
|
||||
static int
|
||||
nv_drm_atomic_apply_modeset_config(struct drm_device *dev,
|
||||
struct drm_atomic_state *state,
|
||||
bool commit)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct NvKmsKapiRequestedModeSetConfig *requested_config =
|
||||
&(to_nv_atomic_state(state)->config);
|
||||
struct NvKmsKapiModeSetReplyConfig reply_config = { };
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If sub-owner permission was granted to another NVKMS client, disallow
|
||||
* modesets through the DRM interface.
|
||||
*/
|
||||
if (nv_dev->subOwnershipGranted) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (commit) {
|
||||
/*
|
||||
* This function does what is necessary to prepare the framebuffers
|
||||
* attached to each new plane in the state for scan out, mostly by
|
||||
* calling back into driver callbacks the NVIDIA driver does not
|
||||
* provide. The end result is that all it does on the NVIDIA driver
|
||||
* is populate the plane state's dma fence pointers with any implicit
|
||||
* sync fences attached to the GEM objects associated with those planes
|
||||
* in the new state, prefering explicit sync fences when appropriate.
|
||||
* This must be done prior to converting the per-plane fences to
|
||||
* semaphore waits below.
|
||||
*/
|
||||
ret = drm_atomic_helper_prepare_planes(dev, state);
|
||||
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
memset(requested_config, 0, sizeof(*requested_config));
|
||||
|
||||
/* Loop over affected crtcs and construct NvKmsKapiRequestedModeSetConfig */
|
||||
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
/*
|
||||
* When committing a state, the new state is already stored in
|
||||
* crtc->state. When checking a proposed state, the proposed state is
|
||||
* stored in crtc_state.
|
||||
*/
|
||||
struct drm_crtc_state *new_crtc_state =
|
||||
commit ? crtc->state : crtc_state;
|
||||
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
|
||||
|
||||
if (commit) {
|
||||
struct drm_crtc_state *old_crtc_state = crtc_state;
|
||||
struct nv_drm_crtc_state *nv_new_crtc_state =
|
||||
to_nv_crtc_state(new_crtc_state);
|
||||
|
||||
nv_new_crtc_state->nv_flip->event = new_crtc_state->event;
|
||||
nv_new_crtc_state->nv_flip->pending_events = 0;
|
||||
new_crtc_state->event = NULL;
|
||||
|
||||
/*
|
||||
* If flip event will be generated by hardware
|
||||
* then defer flip object processing to flip event from hardware.
|
||||
*/
|
||||
if (__will_generate_flip_event(crtc, old_crtc_state)) {
|
||||
nv_drm_crtc_enqueue_flip(nv_crtc,
|
||||
nv_new_crtc_state->nv_flip);
|
||||
|
||||
nv_new_crtc_state->nv_flip = NULL;
|
||||
}
|
||||
|
||||
ret = __nv_drm_convert_in_fences(nv_dev,
|
||||
state,
|
||||
crtc,
|
||||
new_crtc_state);
|
||||
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Do this deep copy after calling __nv_drm_convert_in_fences,
|
||||
* which modifies the new CRTC state's req_config member
|
||||
*/
|
||||
requested_config->headRequestedConfig[nv_crtc->head] =
|
||||
to_nv_crtc_state(new_crtc_state)->req_config;
|
||||
|
||||
requested_config->headsMask |= 1 << nv_crtc->head;
|
||||
}
|
||||
|
||||
if (commit && nvKms->systemInfo.bAllowWriteCombining) {
|
||||
/*
|
||||
* XXX This call is required only if dumb buffer is going
|
||||
* to be presented.
|
||||
*/
|
||||
nv_drm_write_combine_flush();
|
||||
}
|
||||
|
||||
if (!nvKms->applyModeSetConfig(nv_dev->pDevice,
|
||||
requested_config,
|
||||
&reply_config,
|
||||
commit)) {
|
||||
if (commit || reply_config.flipResult != NV_KMS_FLIP_RESULT_IN_PROGRESS) {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (commit && nv_dev->supportsSyncpts) {
|
||||
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
/*! loop over affected crtcs and get NvKmsKapiModeSetReplyConfig */
|
||||
ret = __nv_drm_get_syncpt_data(
|
||||
nv_dev, crtc, crtc_state, requested_config, &reply_config);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (commit && nv_dev->requiresVrrSemaphores && reply_config.vrrFlip) {
|
||||
nvKms->signalVrrSemaphore(nv_dev->pDevice, reply_config.vrrSemaphoreIndex);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nv_drm_atomic_check(struct drm_device *dev,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
int i;
|
||||
|
||||
struct drm_plane *plane;
|
||||
struct drm_plane_state *plane_state;
|
||||
int j;
|
||||
bool cursor_surface_changed;
|
||||
bool cursor_only_commit;
|
||||
|
||||
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
|
||||
/*
|
||||
* Committing cursor surface change without any other plane change can
|
||||
* cause cursor surface in use by HW to be freed prematurely. Add all
|
||||
* planes to the commit to avoid this. This is a workaround for bug 4966645.
|
||||
*/
|
||||
cursor_surface_changed = false;
|
||||
cursor_only_commit = true;
|
||||
nv_drm_for_each_plane_in_state(crtc_state->state, plane, plane_state, j) {
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR) {
|
||||
if (plane_state->fb != plane->state->fb) {
|
||||
cursor_surface_changed = true;
|
||||
}
|
||||
} else {
|
||||
cursor_only_commit = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* if the color management changed on the crtc, we need to update the
|
||||
* crtc's plane's CSC matrices, so add the crtc's planes to the commit
|
||||
*/
|
||||
if (crtc_state->color_mgmt_changed ||
|
||||
(cursor_surface_changed && cursor_only_commit)) {
|
||||
if ((ret = drm_atomic_add_affected_planes(state, crtc)) != 0) {
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((ret = drm_atomic_helper_check(dev, state)) != 0) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
ret = nv_drm_atomic_apply_modeset_config(dev,
|
||||
state, false /* commit */);
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* __nv_drm_handle_flip_event - handle flip occurred event
|
||||
* @nv_crtc: crtc on which flip has been occurred
|
||||
*
|
||||
* This handler dequeues the first nv_drm_flip from the crtc's flip_list,
|
||||
* generates an event if requested at flip time, and frees the nv_drm_flip.
|
||||
*/
|
||||
static void __nv_drm_handle_flip_event(struct nv_drm_crtc *nv_crtc)
|
||||
{
|
||||
struct drm_device *dev = nv_crtc->base.dev;
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct nv_drm_flip *nv_flip;
|
||||
|
||||
/*
|
||||
* Acquire event_lock before nv_flip object dequeue, otherwise immediate
|
||||
* flip event delivery from nv_drm_atomic_commit() races ahead and
|
||||
* messes up with event delivery order.
|
||||
*/
|
||||
spin_lock(&dev->event_lock);
|
||||
nv_flip = nv_drm_crtc_dequeue_flip(nv_crtc);
|
||||
if (likely(nv_flip != NULL)) {
|
||||
struct nv_drm_flip *nv_deferred_flip, *nv_next_deferred_flip;
|
||||
|
||||
if (nv_flip->event != NULL) {
|
||||
drm_crtc_send_vblank_event(&nv_crtc->base, nv_flip->event);
|
||||
}
|
||||
|
||||
/*
|
||||
* Process flips that were deferred until processing of this nv_flip
|
||||
* object.
|
||||
*/
|
||||
list_for_each_entry_safe(nv_deferred_flip,
|
||||
nv_next_deferred_flip,
|
||||
&nv_flip->deferred_flip_list, list_entry) {
|
||||
|
||||
if (nv_deferred_flip->event != NULL) {
|
||||
drm_crtc_send_vblank_event(&nv_crtc->base,
|
||||
nv_deferred_flip->event);
|
||||
}
|
||||
list_del(&nv_deferred_flip->list_entry);
|
||||
|
||||
nv_drm_free(nv_deferred_flip);
|
||||
}
|
||||
}
|
||||
spin_unlock(&dev->event_lock);
|
||||
|
||||
wake_up_all(&nv_dev->flip_event_wq);
|
||||
|
||||
nv_drm_free(nv_flip);
|
||||
}
|
||||
|
||||
int nv_drm_atomic_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state,
|
||||
bool nonblock)
|
||||
{
|
||||
int ret = -EBUSY;
|
||||
|
||||
int i;
|
||||
struct drm_crtc *crtc = NULL;
|
||||
struct drm_crtc_state *crtc_state = NULL;
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
|
||||
/*
|
||||
* XXX: drm_mode_config_funcs::atomic_commit() mandates to return -EBUSY
|
||||
* for nonblocking commit if the commit would need to wait for previous
|
||||
* updates (commit tasks/flip event) to complete. In case of blocking
|
||||
* commits it mandates to wait for previous updates to complete. However,
|
||||
* the kernel DRM-KMS documentation does explicitly allow maintaining a
|
||||
* queue of outstanding commits.
|
||||
*
|
||||
* Our system already implements such a queue, but due to
|
||||
* bug 4054608, it is currently not used.
|
||||
*/
|
||||
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
|
||||
|
||||
/*
|
||||
* Here you aren't required to hold nv_drm_crtc::flip_list_lock
|
||||
* because:
|
||||
*
|
||||
* The core DRM driver acquires lock for all affected crtcs before
|
||||
* calling into ->commit() hook, therefore it is not possible for
|
||||
* other threads to call into ->commit() hook affecting same crtcs
|
||||
* and enqueue flip objects into flip_list -
|
||||
*
|
||||
* nv_drm_atomic_commit_internal()
|
||||
* |-> nv_drm_atomic_apply_modeset_config(commit=true)
|
||||
* |-> nv_drm_crtc_enqueue_flip()
|
||||
*
|
||||
* Only possibility is list_empty check races with code path
|
||||
* dequeuing flip object -
|
||||
*
|
||||
* __nv_drm_handle_flip_event()
|
||||
* |-> nv_drm_crtc_dequeue_flip()
|
||||
*
|
||||
* But this race condition can't lead list_empty() to return
|
||||
* incorrect result. nv_drm_crtc_dequeue_flip() in the middle of
|
||||
* updating the list could not trick us into thinking the list is
|
||||
* empty when it isn't.
|
||||
*/
|
||||
if (nonblock) {
|
||||
if (!list_empty(&nv_crtc->flip_list)) {
|
||||
return -EBUSY;
|
||||
}
|
||||
} else {
|
||||
if (wait_event_timeout(
|
||||
nv_dev->flip_event_wq,
|
||||
list_empty(&nv_crtc->flip_list),
|
||||
3 * HZ /* 3 second */) == 0) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Flip event timeout on head %u", nv_crtc->head);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If the legacy LUT needs to be updated, ensure that the previous LUT
|
||||
* update is complete first.
|
||||
*/
|
||||
if (crtc_state->color_mgmt_changed) {
|
||||
NvBool complete = nvKms->checkLutNotifier(nv_dev->pDevice,
|
||||
nv_crtc->head,
|
||||
!nonblock /* waitForCompletion */);
|
||||
|
||||
/* If checking the LUT notifier failed, assume no LUT notifier is set. */
|
||||
if (!complete) {
|
||||
if (nonblock) {
|
||||
return -EBUSY;
|
||||
} else {
|
||||
/*
|
||||
* checkLutNotifier should wait on the notifier in this
|
||||
* case, so we should only get here if the wait timed out.
|
||||
*/
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"LUT notifier timeout on head %u", nv_crtc->head);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* nv_drm_atomic_commit_internal()
|
||||
* implements blocking/non-blocking atomic commit using
|
||||
* nv_drm_crtc::flip_list, it does not require any help from core DRM
|
||||
* helper functions to stall commit processing. Therefore passing false to
|
||||
* 'stall' parameter.
|
||||
* In this context, failure from drm_atomic_helper_swap_state() is not
|
||||
* expected.
|
||||
*/
|
||||
|
||||
ret = drm_atomic_helper_swap_state(state, false /* stall */);
|
||||
if (WARN_ON(ret != 0)) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* nv_drm_atomic_commit_internal() must not return failure after
|
||||
* calling drm_atomic_helper_swap_state().
|
||||
*/
|
||||
|
||||
if ((ret = nv_drm_atomic_apply_modeset_config(
|
||||
dev,
|
||||
state, true /* commit */)) != 0) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to apply atomic modeset. Error code: %d",
|
||||
ret);
|
||||
|
||||
goto done;
|
||||
}
|
||||
|
||||
nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc);
|
||||
struct nv_drm_crtc_state *nv_new_crtc_state =
|
||||
to_nv_crtc_state(crtc->state);
|
||||
|
||||
/*
|
||||
* If nv_drm_atomic_apply_modeset_config() hasn't consumed the flip
|
||||
* object, no event will be generated for this flip, and we need process
|
||||
* it:
|
||||
*/
|
||||
|
||||
if (nv_new_crtc_state->nv_flip != NULL) {
|
||||
/*
|
||||
* First, defer processing of all pending flips for this crtc until
|
||||
* last flip in the queue has been processed. This is to ensure a
|
||||
* correct order in event delivery.
|
||||
*/
|
||||
spin_lock(&nv_crtc->flip_list_lock);
|
||||
if (!list_empty(&nv_crtc->flip_list)) {
|
||||
struct nv_drm_flip *nv_last_flip =
|
||||
list_last_entry(&nv_crtc->flip_list,
|
||||
struct nv_drm_flip, list_entry);
|
||||
|
||||
list_add(&nv_new_crtc_state->nv_flip->list_entry,
|
||||
&nv_last_flip->deferred_flip_list);
|
||||
|
||||
nv_new_crtc_state->nv_flip = NULL;
|
||||
}
|
||||
spin_unlock(&nv_crtc->flip_list_lock);
|
||||
}
|
||||
|
||||
if (nv_new_crtc_state->nv_flip != NULL) {
|
||||
/*
|
||||
* Then, if no more pending flips for this crtc, deliver event for the
|
||||
* current flip.
|
||||
*/
|
||||
if (nv_new_crtc_state->nv_flip->event != NULL) {
|
||||
spin_lock(&dev->event_lock);
|
||||
drm_crtc_send_vblank_event(crtc,
|
||||
nv_new_crtc_state->nv_flip->event);
|
||||
spin_unlock(&dev->event_lock);
|
||||
}
|
||||
|
||||
nv_drm_free(nv_new_crtc_state->nv_flip);
|
||||
nv_new_crtc_state->nv_flip = NULL;
|
||||
}
|
||||
|
||||
if (!nonblock) {
|
||||
/*
|
||||
* Here you aren't required to hold nv_drm_crtc::flip_list_lock
|
||||
* because:
|
||||
*
|
||||
* The core DRM driver acquires lock for all affected crtcs before
|
||||
* calling into ->commit() hook, therefore it is not possible for
|
||||
* other threads to call into ->commit() hook affecting same crtcs
|
||||
* and enqueue flip objects into flip_list -
|
||||
*
|
||||
* nv_drm_atomic_commit_internal()
|
||||
* |-> nv_drm_atomic_apply_modeset_config(commit=true)
|
||||
* |-> nv_drm_crtc_enqueue_flip()
|
||||
*
|
||||
* Only possibility is list_empty check races with code path
|
||||
* dequeuing flip object -
|
||||
*
|
||||
* __nv_drm_handle_flip_event()
|
||||
* |-> nv_drm_crtc_dequeue_flip()
|
||||
*
|
||||
* But this race condition can't lead list_empty() to return
|
||||
* incorrect result. nv_drm_crtc_dequeue_flip() in the middle of
|
||||
* updating the list could not trick us into thinking the list is
|
||||
* empty when it isn't.
|
||||
*/
|
||||
if (wait_event_timeout(
|
||||
nv_dev->flip_event_wq,
|
||||
list_empty(&nv_crtc->flip_list),
|
||||
3 * HZ /* 3 second */) == 0) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Flip event timeout on head %u", nv_crtc->head);
|
||||
while (!list_empty(&nv_crtc->flip_list)) {
|
||||
__nv_drm_handle_flip_event(nv_crtc);
|
||||
}
|
||||
}
|
||||
|
||||
if (crtc_state->color_mgmt_changed) {
|
||||
NvBool complete = nvKms->checkLutNotifier(nv_dev->pDevice,
|
||||
nv_crtc->head,
|
||||
true /* waitForCompletion */);
|
||||
if (!complete) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"LUT notifier timeout on head %u", nv_crtc->head);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
|
||||
/*
|
||||
* State will be freed when the caller drops its reference after we return.
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nv_drm_handle_flip_occurred(struct nv_drm_device *nv_dev,
|
||||
NvU32 head, NvU32 plane)
|
||||
{
|
||||
struct nv_drm_crtc *nv_crtc = nv_drm_crtc_lookup(nv_dev, head);
|
||||
|
||||
if (NV_DRM_WARN(nv_crtc == NULL)) {
|
||||
return;
|
||||
}
|
||||
|
||||
__nv_drm_handle_flip_event(nv_crtc);
|
||||
}
|
||||
|
||||
#endif
|
||||
53
kernel-open/nvidia-drm/nvidia-drm-modeset.h
Normal file
53
kernel-open/nvidia-drm/nvidia-drm-modeset.h
Normal file
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_MODESET_H__
|
||||
#define __NVIDIA_DRM_MODESET_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
|
||||
#include "nvkms-kapi.h"
|
||||
|
||||
struct drm_device;
|
||||
struct drm_atomic_state;
|
||||
|
||||
struct drm_atomic_state *nv_drm_atomic_state_alloc(struct drm_device *dev);
|
||||
void nv_drm_atomic_state_clear(struct drm_atomic_state *state);
|
||||
void nv_drm_atomic_state_free(struct drm_atomic_state *state);
|
||||
|
||||
int nv_drm_atomic_check(struct drm_device *dev,
|
||||
struct drm_atomic_state *state);
|
||||
|
||||
int nv_drm_atomic_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state, bool nonblock);
|
||||
|
||||
|
||||
void nv_drm_handle_flip_occurred(struct nv_drm_device *nv_dev,
|
||||
NvU32 head, NvU32 plane);
|
||||
|
||||
int nv_drm_shut_down_all_crtcs(struct drm_device *dev);
|
||||
|
||||
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
|
||||
|
||||
#endif /* __NVIDIA_DRM_MODESET_H__ */
|
||||
257
kernel-open/nvidia-drm/nvidia-drm-os-interface.c
Normal file
257
kernel-open/nvidia-drm/nvidia-drm-os-interface.c
Normal file
@@ -0,0 +1,257 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2025, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "nvidia-drm-os-interface.h"
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
#include <linux/file.h>
|
||||
#include <linux/sync_file.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
#include "nv-mm.h"
|
||||
|
||||
#if defined(NV_DRM_DRMP_H_PRESENT)
|
||||
#include <drm/drmP.h>
|
||||
#endif
|
||||
|
||||
bool nv_drm_modeset_module_param = false;
|
||||
bool nv_drm_fbdev_module_param = true;
|
||||
|
||||
void *nv_drm_calloc(size_t nmemb, size_t size)
|
||||
{
|
||||
size_t total_size = nmemb * size;
|
||||
//
|
||||
// Check for overflow.
|
||||
//
|
||||
if ((nmemb != 0) && ((total_size / nmemb) != size))
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
return kzalloc(nmemb * size, GFP_KERNEL);
|
||||
}
|
||||
|
||||
void nv_drm_free(void *ptr)
|
||||
{
|
||||
if (IS_ERR(ptr)) {
|
||||
return;
|
||||
}
|
||||
|
||||
kfree(ptr);
|
||||
}
|
||||
|
||||
#if defined(NVCPU_X86) || defined(NVCPU_X86_64)
|
||||
#define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory")
|
||||
#elif defined(NVCPU_PPC64LE)
|
||||
#define WRITE_COMBINE_FLUSH() asm volatile("sync":::"memory")
|
||||
#else
|
||||
#define WRITE_COMBINE_FLUSH() mb()
|
||||
#endif
|
||||
|
||||
void nv_drm_write_combine_flush(void)
|
||||
{
|
||||
WRITE_COMBINE_FLUSH();
|
||||
}
|
||||
|
||||
int nv_drm_lock_user_pages(unsigned long address,
|
||||
unsigned long pages_count, struct page ***pages)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct page **user_pages;
|
||||
int pages_pinned;
|
||||
|
||||
user_pages = nv_drm_calloc(pages_count, sizeof(*user_pages));
|
||||
|
||||
if (user_pages == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nv_mmap_read_lock(mm);
|
||||
|
||||
pages_pinned = NV_PIN_USER_PAGES(address, pages_count, FOLL_WRITE,
|
||||
user_pages);
|
||||
nv_mmap_read_unlock(mm);
|
||||
|
||||
if (pages_pinned < 0 || (unsigned)pages_pinned < pages_count) {
|
||||
goto failed;
|
||||
}
|
||||
|
||||
*pages = user_pages;
|
||||
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
|
||||
if (pages_pinned > 0) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pages_pinned; i++) {
|
||||
NV_UNPIN_USER_PAGE(user_pages[i]);
|
||||
}
|
||||
}
|
||||
|
||||
nv_drm_free(user_pages);
|
||||
|
||||
return (pages_pinned < 0) ? pages_pinned : -EINVAL;
|
||||
}
|
||||
|
||||
void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < pages_count; i++) {
|
||||
set_page_dirty_lock(pages[i]);
|
||||
NV_UNPIN_USER_PAGE(pages[i]);
|
||||
}
|
||||
|
||||
nv_drm_free(pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* linuxkpi vmap doesn't use the flags argument as it
|
||||
* doesn't seem to be needed. Define VM_USERMAP to 0
|
||||
* to make errors go away
|
||||
*
|
||||
* vmap: sys/compat/linuxkpi/common/src/linux_compat.c
|
||||
*/
|
||||
#if defined(NV_BSD)
|
||||
#define VM_USERMAP 0
|
||||
#endif
|
||||
|
||||
void *nv_drm_vmap(struct page **pages, unsigned long pages_count, bool cached)
|
||||
{
|
||||
pgprot_t prot = PAGE_KERNEL;
|
||||
|
||||
if (!cached) {
|
||||
prot = pgprot_noncached(PAGE_KERNEL);
|
||||
}
|
||||
|
||||
return vmap(pages, pages_count, VM_USERMAP, prot);
|
||||
}
|
||||
|
||||
void nv_drm_vunmap(void *address)
|
||||
{
|
||||
vunmap(address);
|
||||
}
|
||||
|
||||
bool nv_drm_workthread_init(nv_drm_workthread *worker, const char *name)
|
||||
{
|
||||
worker->shutting_down = false;
|
||||
if (nv_kthread_q_init(&worker->q, name)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
spin_lock_init(&worker->lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void nv_drm_workthread_shutdown(nv_drm_workthread *worker)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&worker->lock, flags);
|
||||
worker->shutting_down = true;
|
||||
spin_unlock_irqrestore(&worker->lock, flags);
|
||||
|
||||
nv_kthread_q_stop(&worker->q);
|
||||
}
|
||||
|
||||
void nv_drm_workthread_work_init(nv_drm_work *work,
|
||||
void (*callback)(void *),
|
||||
void *arg)
|
||||
{
|
||||
nv_kthread_q_item_init(work, callback, arg);
|
||||
}
|
||||
|
||||
int nv_drm_workthread_add_work(nv_drm_workthread *worker, nv_drm_work *work)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&worker->lock, flags);
|
||||
if (!worker->shutting_down) {
|
||||
ret = nv_kthread_q_schedule_q_item(&worker->q, work);
|
||||
}
|
||||
spin_unlock_irqrestore(&worker->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void nv_drm_timer_setup(nv_drm_timer *timer, void (*callback)(nv_drm_timer *nv_drm_timer))
|
||||
{
|
||||
nv_timer_setup(timer, callback);
|
||||
}
|
||||
|
||||
void nv_drm_mod_timer(nv_drm_timer *timer, unsigned long timeout_native)
|
||||
{
|
||||
mod_timer(&timer->kernel_timer, timeout_native);
|
||||
}
|
||||
|
||||
unsigned long nv_drm_timer_now(void)
|
||||
{
|
||||
return jiffies;
|
||||
}
|
||||
|
||||
unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms)
|
||||
{
|
||||
return jiffies + msecs_to_jiffies(relative_timeout_ms);
|
||||
}
|
||||
|
||||
int nv_drm_create_sync_file(struct dma_fence *fence)
|
||||
{
|
||||
struct sync_file *sync;
|
||||
int fd = get_unused_fd_flags(O_CLOEXEC);
|
||||
|
||||
if (fd < 0) {
|
||||
return fd;
|
||||
}
|
||||
|
||||
/* sync_file_create() generates its own reference to the fence */
|
||||
sync = sync_file_create(fence);
|
||||
|
||||
if (IS_ERR(sync)) {
|
||||
put_unused_fd(fd);
|
||||
return PTR_ERR(sync);
|
||||
}
|
||||
|
||||
fd_install(fd, sync->file);
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
struct dma_fence *nv_drm_sync_file_get_fence(int fd)
|
||||
{
|
||||
return sync_file_get_fence(fd);
|
||||
}
|
||||
|
||||
void nv_drm_yield(void)
|
||||
{
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule_timeout(1);
|
||||
}
|
||||
|
||||
#endif /* NV_DRM_AVAILABLE */
|
||||
112
kernel-open/nvidia-drm/nvidia-drm-os-interface.h
Normal file
112
kernel-open/nvidia-drm/nvidia-drm-os-interface.h
Normal file
@@ -0,0 +1,112 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2025, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_OS_INTERFACE_H__
|
||||
#define __NVIDIA_DRM_OS_INTERFACE_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */
|
||||
|
||||
#include "nvtypes.h"
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
#include "linux/dma-fence.h"
|
||||
|
||||
#if defined(NV_LINUX) || defined(NV_BSD)
|
||||
#include "nv-kthread-q.h"
|
||||
#include "linux/spinlock.h"
|
||||
|
||||
typedef struct nv_drm_workthread {
|
||||
spinlock_t lock;
|
||||
struct nv_kthread_q q;
|
||||
bool shutting_down;
|
||||
} nv_drm_workthread;
|
||||
|
||||
typedef nv_kthread_q_item_t nv_drm_work;
|
||||
|
||||
#else
|
||||
#error "Need to define deferred work primitives for this OS"
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX) || defined(NV_BSD)
|
||||
#include "nv-timer.h"
|
||||
|
||||
typedef struct nv_timer nv_drm_timer;
|
||||
|
||||
#else
|
||||
#error "Need to define kernel timer callback primitives for this OS"
|
||||
#endif
|
||||
|
||||
struct page;
|
||||
|
||||
/* Set to true when the atomic modeset feature is enabled. */
|
||||
extern bool nv_drm_modeset_module_param;
|
||||
#if defined(NV_DRM_FBDEV_AVAILABLE)
|
||||
/* Set to true when the nvidia-drm driver should install a framebuffer device */
|
||||
extern bool nv_drm_fbdev_module_param;
|
||||
#endif
|
||||
|
||||
void *nv_drm_calloc(size_t nmemb, size_t size);
|
||||
|
||||
void nv_drm_free(void *ptr);
|
||||
|
||||
void nv_drm_write_combine_flush(void);
|
||||
|
||||
int nv_drm_lock_user_pages(unsigned long address,
|
||||
unsigned long pages_count, struct page ***pages);
|
||||
|
||||
void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages);
|
||||
|
||||
void *nv_drm_vmap(struct page **pages, unsigned long pages_count, bool cached);
|
||||
|
||||
void nv_drm_vunmap(void *address);
|
||||
|
||||
bool nv_drm_workthread_init(nv_drm_workthread *worker, const char *name);
|
||||
|
||||
/* Can be called concurrently with nv_drm_workthread_add_work() */
|
||||
void nv_drm_workthread_shutdown(nv_drm_workthread *worker);
|
||||
|
||||
void nv_drm_workthread_work_init(nv_drm_work *work,
|
||||
void (*callback)(void *),
|
||||
void *arg);
|
||||
|
||||
/* Can be called concurrently with nv_drm_workthread_shutdown() */
|
||||
int nv_drm_workthread_add_work(nv_drm_workthread *worker, nv_drm_work *work);
|
||||
|
||||
void nv_drm_timer_setup(nv_drm_timer *timer,
|
||||
void (*callback)(nv_drm_timer *nv_drm_timer));
|
||||
|
||||
void nv_drm_mod_timer(nv_drm_timer *timer, unsigned long relative_timeout_ms);
|
||||
|
||||
unsigned long nv_drm_timer_now(void);
|
||||
|
||||
unsigned long nv_drm_timeout_from_ms(NvU64 relative_timeout_ms);
|
||||
|
||||
int nv_drm_create_sync_file(struct dma_fence *fence);
|
||||
|
||||
struct dma_fence *nv_drm_sync_file_get_fence(int fd);
|
||||
|
||||
void nv_drm_yield(void);
|
||||
|
||||
#endif /* defined(NV_DRM_AVAILABLE) */
|
||||
|
||||
#endif /* __NVIDIA_DRM_OS_INTERFACE_H__ */
|
||||
211
kernel-open/nvidia-drm/nvidia-drm-priv.h
Normal file
211
kernel-open/nvidia-drm/nvidia-drm-priv.h
Normal file
@@ -0,0 +1,211 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_PRIV_H__
|
||||
#define __NVIDIA_DRM_PRIV_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
#if defined(NV_DRM_DRMP_H_PRESENT)
|
||||
#include <drm/drmP.h>
|
||||
#endif
|
||||
|
||||
#include <drm/drm_device.h>
|
||||
#include <drm/drm_gem.h>
|
||||
|
||||
#include "nvidia-drm-os-interface.h"
|
||||
|
||||
#include "nvkms-kapi.h"
|
||||
|
||||
#define NV_DRM_LOG_ERR(__fmt, ...) \
|
||||
DRM_ERROR("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
|
||||
|
||||
/*
|
||||
* DRM_WARN() was added in v4.9 by kernel commit
|
||||
* 30b0da8d556e65ff935a56cd82c05ba0516d3e4a
|
||||
*
|
||||
* Before this commit, only DRM_INFO and DRM_ERROR were defined and
|
||||
* DRM_INFO(fmt, ...) was defined as
|
||||
* printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__). So, if
|
||||
* DRM_WARN is undefined this defines NV_DRM_LOG_WARN following the
|
||||
* same pattern as DRM_INFO.
|
||||
*/
|
||||
#ifdef DRM_WARN
|
||||
#define NV_DRM_LOG_WARN(__fmt, ...) \
|
||||
DRM_WARN("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
|
||||
#else
|
||||
#define NV_DRM_LOG_WARN(__fmt, ...) \
|
||||
printk(KERN_WARNING "[" DRM_NAME "] [nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
#define NV_DRM_LOG_INFO(__fmt, ...) \
|
||||
DRM_INFO("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
|
||||
|
||||
#define NV_DRM_DEV_LOG_INFO(__dev, __fmt, ...) \
|
||||
NV_DRM_LOG_INFO("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
|
||||
|
||||
#define NV_DRM_DEV_LOG_WARN(__dev, __fmt, ...) \
|
||||
NV_DRM_LOG_WARN("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
|
||||
|
||||
#define NV_DRM_DEV_LOG_ERR(__dev, __fmt, ...) \
|
||||
NV_DRM_LOG_ERR("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__)
|
||||
|
||||
#define NV_DRM_WARN(__condition) WARN_ON((__condition))
|
||||
|
||||
#define NV_DRM_DEBUG_DRIVER(__fmt, ...) \
|
||||
DRM_DEBUG_DRIVER("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__)
|
||||
|
||||
#define NV_DRM_DEV_DEBUG_DRIVER(__dev, __fmt, ...) \
|
||||
DRM_DEBUG_DRIVER("[GPU ID 0x%08x] " __fmt, \
|
||||
__dev->gpu_info.gpu_id, ##__VA_ARGS__)
|
||||
|
||||
enum nv_drm_input_color_space {
|
||||
NV_DRM_INPUT_COLOR_SPACE_NONE,
|
||||
NV_DRM_INPUT_COLOR_SPACE_SCRGB_LINEAR,
|
||||
NV_DRM_INPUT_COLOR_SPACE_BT2100_PQ
|
||||
};
|
||||
|
||||
struct nv_drm_device {
|
||||
nv_gpu_info_t gpu_info;
|
||||
MIGDeviceId gpu_mig_device;
|
||||
|
||||
struct drm_device *dev;
|
||||
|
||||
struct NvKmsKapiDevice *pDevice;
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
/*
|
||||
* Lock to protect drm-subsystem and fields of this structure
|
||||
* from concurrent access.
|
||||
*
|
||||
* Do not hold this lock if some lock from core drm-subsystem
|
||||
* is already held, locking order should be like this -
|
||||
*
|
||||
* mutex_lock(nv_drm_device::lock);
|
||||
* ....
|
||||
* mutex_lock(drm_device::mode_config::lock);
|
||||
* ....
|
||||
* .......
|
||||
* mutex_unlock(drm_device::mode_config::lock);
|
||||
* ........
|
||||
* ..
|
||||
* mutex_lock(drm_device::struct_mutex);
|
||||
* ....
|
||||
* ........
|
||||
* mutex_unlock(drm_device::struct_mutex);
|
||||
* ..
|
||||
* mutex_unlock(nv_drm_device::lock);
|
||||
*/
|
||||
struct mutex lock;
|
||||
|
||||
NvU32 pitchAlignment;
|
||||
|
||||
NvU8 genericPageKind;
|
||||
NvU8 pageKindGeneration;
|
||||
NvU8 sectorLayout;
|
||||
NvU64 modifiers[6 /* block linear */ + 1 /* linear */ + 1 /* terminator */];
|
||||
|
||||
struct delayed_work hotplug_event_work;
|
||||
atomic_t enable_event_handling;
|
||||
|
||||
/**
|
||||
* @flip_event_wq:
|
||||
*
|
||||
* The wait queue on which nv_drm_atomic_commit_internal() sleeps until
|
||||
* next flip event occurs.
|
||||
*/
|
||||
wait_queue_head_t flip_event_wq;
|
||||
|
||||
#endif
|
||||
|
||||
NvU64 semsurf_stride;
|
||||
NvU64 semsurf_max_submitted_offset;
|
||||
|
||||
NvBool hasVideoMemory;
|
||||
|
||||
NvBool supportsSyncpts;
|
||||
NvBool requiresVrrSemaphores;
|
||||
NvBool subOwnershipGranted;
|
||||
NvBool hasFramebufferConsole;
|
||||
|
||||
struct drm_property *nv_out_fence_property;
|
||||
struct drm_property *nv_input_colorspace_property;
|
||||
|
||||
struct {
|
||||
NvU32 count;
|
||||
NvU32 next_index;
|
||||
} display_semaphores;
|
||||
|
||||
#if defined(NV_DRM_HAS_HDR_OUTPUT_METADATA)
|
||||
struct drm_property *nv_hdr_output_metadata_property;
|
||||
#endif
|
||||
|
||||
struct drm_property *nv_plane_lms_ctm_property;
|
||||
struct drm_property *nv_plane_lms_to_itp_ctm_property;
|
||||
struct drm_property *nv_plane_itp_to_lms_ctm_property;
|
||||
struct drm_property *nv_plane_blend_ctm_property;
|
||||
|
||||
struct drm_property *nv_plane_degamma_tf_property;
|
||||
struct drm_property *nv_plane_degamma_lut_property;
|
||||
struct drm_property *nv_plane_degamma_lut_size_property;
|
||||
struct drm_property *nv_plane_degamma_multiplier_property;
|
||||
|
||||
struct drm_property *nv_plane_tmo_lut_property;
|
||||
struct drm_property *nv_plane_tmo_lut_size_property;
|
||||
|
||||
struct drm_property *nv_crtc_regamma_tf_property;
|
||||
struct drm_property *nv_crtc_regamma_lut_property;
|
||||
struct drm_property *nv_crtc_regamma_lut_size_property;
|
||||
struct drm_property *nv_crtc_regamma_divisor_property;
|
||||
|
||||
struct nv_drm_device *next;
|
||||
|
||||
NvU64 vtFbBaseAddress;
|
||||
NvU64 vtFbSize;
|
||||
};
|
||||
|
||||
static inline NvU32 nv_drm_next_display_semaphore(
|
||||
struct nv_drm_device *nv_dev)
|
||||
{
|
||||
NvU32 current_index = nv_dev->display_semaphores.next_index++;
|
||||
|
||||
if (nv_dev->display_semaphores.next_index >=
|
||||
nv_dev->display_semaphores.count) {
|
||||
nv_dev->display_semaphores.next_index = 0;
|
||||
}
|
||||
|
||||
return current_index;
|
||||
}
|
||||
|
||||
static inline struct nv_drm_device *to_nv_device(
|
||||
struct drm_device *dev)
|
||||
{
|
||||
return dev->dev_private;
|
||||
}
|
||||
|
||||
extern const struct NvKmsKapiFunctionsTable* const nvKms;
|
||||
|
||||
#endif /* defined(NV_DRM_AVAILABLE) */
|
||||
|
||||
#endif /* __NVIDIA_DRM_PRIV_H__ */
|
||||
111
kernel-open/nvidia-drm/nvidia-drm-sources.mk
Normal file
111
kernel-open/nvidia-drm/nvidia-drm-sources.mk
Normal file
@@ -0,0 +1,111 @@
|
||||
###########################################################################
|
||||
# Kbuild fragment for nvidia-drm.ko
|
||||
###########################################################################
|
||||
|
||||
#
|
||||
# Define NVIDIA_DRM_SOURCES
|
||||
#
|
||||
|
||||
NVIDIA_DRM_SOURCES =
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-drv.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-utils.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-crtc.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-encoder.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-connector.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fb.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-modeset.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fence.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-helper.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nv-kthread-q.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nv-pci-table.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-nvkms-memory.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-user-memory.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-dma-buf.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-format.c
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-os-interface.c
|
||||
|
||||
#
|
||||
# Register the conftests needed by nvidia-drm.ko
|
||||
#
|
||||
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_atomic_available
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_inc
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_dec_and_test
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_alpha_blending_available
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_fd_to_handle
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_drm_gem_prime_handle_to_fd
|
||||
NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_present_timer_delete_sync
|
||||
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages_remote
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages_remote
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += pin_user_pages
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_driver_has_gem_prime_res_obj
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_funcs_have_mode_in_name
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_has_vrr_capable_property
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_for_each_possible_encoder
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += nvhost_dma_fence_unpack
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_aperture_remove_conflicting_framebuffers
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += aperture_remove_conflicting_devices
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += aperture_remove_conflicting_pci_devices
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_fbdev_generic_setup
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_fbdev_ttm_setup
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_client_setup
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_attach_hdr_output_metadata_property
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_plane_create_color_properties
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_legacy_gamma_set
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_mixed
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_prime_mmap
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_sysfs_connector_property_event
|
||||
NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_sysfs_connector_status_event
|
||||
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_legacy_dev_list
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_prime_flag_present
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_t
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_has_resv
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_async_flip
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_pageflip_flags
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_vrr_enabled
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_offset_node_has_readonly
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_display_mode_has_vrefresh
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_master_set_has_int_return_type
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_free_object
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_prime_pages_to_sg_has_drm_device_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_callbacks
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_atomic_check_has_atomic_state_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_vmap_has_map_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_plane_atomic_check_has_atomic_state_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_device_has_pdev
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_no_vblank
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_config_has_allow_fb_modifiers
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_has_hdr_output_metadata
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_add_fence
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += dma_resv_reserve_fences
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += reservation_object_reserve_shared_has_num_fences_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_has_override_edid
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_file_get_master
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_modeset_lock_all_end
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += vm_area_struct_has_const_vm_flags
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_dumb_destroy
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += fence_ops_use_64bit_seqno
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_framebuffers_has_driver_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_framebuffers_has_no_primary_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_aperture_remove_conflicting_pci_framebuffers_has_driver_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_create_dp_colorspace_property_has_supported_colorspaces_arg
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_syncobj_features_present
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_unlocked_ioctl_flag_present
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_color_ctm_3x4_present
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_format_info_has_is_yuv
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_mmap
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_output_poll_changed
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_date
|
||||
NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_helper_funcs_mode_valid_has_const_mode_arg
|
||||
228
kernel-open/nvidia-drm/nvidia-drm-utils.c
Normal file
228
kernel-open/nvidia-drm/nvidia-drm-utils.c
Normal file
@@ -0,0 +1,228 @@
|
||||
/*
|
||||
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
|
||||
#if defined(NV_DRM_DRMP_H_PRESENT)
|
||||
#include <drm/drmP.h>
|
||||
#endif
|
||||
|
||||
#include <drm/drm_plane.h>
|
||||
#include <drm/drm_modes.h>
|
||||
#include <uapi/drm/drm_fourcc.h>
|
||||
|
||||
#include "nvidia-drm-priv.h"
|
||||
#include "nvidia-drm-utils.h"
|
||||
|
||||
struct NvKmsKapiConnectorInfo*
|
||||
nvkms_get_connector_info(struct NvKmsKapiDevice *pDevice,
|
||||
NvKmsKapiConnector hConnector)
|
||||
{
|
||||
struct NvKmsKapiConnectorInfo *connectorInfo =
|
||||
nv_drm_calloc(1, sizeof(*connectorInfo));
|
||||
|
||||
if (connectorInfo == NULL) {
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
if (!nvKms->getConnectorInfo(pDevice, hConnector, connectorInfo)) {
|
||||
nv_drm_free(connectorInfo);
|
||||
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
return connectorInfo;
|
||||
}
|
||||
|
||||
int
|
||||
nvkms_connector_signal_to_drm_encoder_signal(NvKmsConnectorSignalFormat format)
|
||||
{
|
||||
switch (format) {
|
||||
default:
|
||||
case NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN:
|
||||
return DRM_MODE_ENCODER_NONE;
|
||||
case NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS:
|
||||
case NVKMS_CONNECTOR_SIGNAL_FORMAT_DP:
|
||||
return DRM_MODE_ENCODER_TMDS;
|
||||
case NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS:
|
||||
return DRM_MODE_ENCODER_LVDS;
|
||||
case NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA:
|
||||
return DRM_MODE_ENCODER_DAC;
|
||||
case NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI:
|
||||
return DRM_MODE_ENCODER_DSI;
|
||||
}
|
||||
}
|
||||
|
||||
int nvkms_connector_type_to_drm_connector_type(NvKmsConnectorType type,
|
||||
NvBool internal)
|
||||
{
|
||||
switch (type) {
|
||||
default:
|
||||
case NVKMS_CONNECTOR_TYPE_UNKNOWN:
|
||||
return DRM_MODE_CONNECTOR_Unknown;
|
||||
case NVKMS_CONNECTOR_TYPE_DP:
|
||||
return
|
||||
internal ?
|
||||
DRM_MODE_CONNECTOR_eDP : DRM_MODE_CONNECTOR_DisplayPort;
|
||||
case NVKMS_CONNECTOR_TYPE_HDMI:
|
||||
return DRM_MODE_CONNECTOR_HDMIA;
|
||||
case NVKMS_CONNECTOR_TYPE_DVI_D:
|
||||
return DRM_MODE_CONNECTOR_DVID;
|
||||
case NVKMS_CONNECTOR_TYPE_DVI_I:
|
||||
return DRM_MODE_CONNECTOR_DVII;
|
||||
case NVKMS_CONNECTOR_TYPE_LVDS:
|
||||
return DRM_MODE_CONNECTOR_LVDS;
|
||||
case NVKMS_CONNECTOR_TYPE_VGA:
|
||||
return DRM_MODE_CONNECTOR_VGA;
|
||||
case NVKMS_CONNECTOR_TYPE_DSI:
|
||||
return DRM_MODE_CONNECTOR_DSI;
|
||||
case NVKMS_CONNECTOR_TYPE_DP_SERIALIZER:
|
||||
return DRM_MODE_CONNECTOR_DisplayPort;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nvkms_display_mode_to_drm_mode(const struct NvKmsKapiDisplayMode *displayMode,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
#if defined(NV_DRM_DISPLAY_MODE_HAS_VREFRESH)
|
||||
mode->vrefresh = (displayMode->timings.refreshRate + 500) / 1000; /* In Hz */
|
||||
#endif
|
||||
|
||||
mode->clock = (displayMode->timings.pixelClockHz + 500) / 1000; /* In Hz */
|
||||
|
||||
mode->hdisplay = displayMode->timings.hVisible;
|
||||
mode->hsync_start = displayMode->timings.hSyncStart;
|
||||
mode->hsync_end = displayMode->timings.hSyncEnd;
|
||||
mode->htotal = displayMode->timings.hTotal;
|
||||
mode->hskew = displayMode->timings.hSkew;
|
||||
|
||||
mode->vdisplay = displayMode->timings.vVisible;
|
||||
mode->vsync_start = displayMode->timings.vSyncStart;
|
||||
mode->vsync_end = displayMode->timings.vSyncEnd;
|
||||
mode->vtotal = displayMode->timings.vTotal;
|
||||
|
||||
if (displayMode->timings.flags.interlaced) {
|
||||
mode->flags |= DRM_MODE_FLAG_INTERLACE;
|
||||
}
|
||||
|
||||
if (displayMode->timings.flags.doubleScan) {
|
||||
mode->flags |= DRM_MODE_FLAG_DBLSCAN;
|
||||
}
|
||||
|
||||
if (displayMode->timings.flags.hSyncPos) {
|
||||
mode->flags |= DRM_MODE_FLAG_PHSYNC;
|
||||
}
|
||||
|
||||
if (displayMode->timings.flags.hSyncNeg) {
|
||||
mode->flags |= DRM_MODE_FLAG_NHSYNC;
|
||||
}
|
||||
|
||||
if (displayMode->timings.flags.vSyncPos) {
|
||||
mode->flags |= DRM_MODE_FLAG_PVSYNC;
|
||||
}
|
||||
|
||||
if (displayMode->timings.flags.vSyncNeg) {
|
||||
mode->flags |= DRM_MODE_FLAG_NVSYNC;
|
||||
}
|
||||
|
||||
mode->width_mm = displayMode->timings.widthMM;
|
||||
mode->height_mm = displayMode->timings.heightMM;
|
||||
|
||||
if (strlen(displayMode->name) != 0) {
|
||||
memcpy(
|
||||
mode->name, displayMode->name,
|
||||
min(sizeof(mode->name), sizeof(displayMode->name)));
|
||||
|
||||
mode->name[sizeof(mode->name) - 1] = '\0';
|
||||
} else {
|
||||
drm_mode_set_name(mode);
|
||||
}
|
||||
}
|
||||
|
||||
void drm_mode_to_nvkms_display_mode(const struct drm_display_mode *src,
|
||||
struct NvKmsKapiDisplayMode *dst)
|
||||
{
|
||||
#if defined(NV_DRM_DISPLAY_MODE_HAS_VREFRESH)
|
||||
dst->timings.refreshRate = src->vrefresh * 1000;
|
||||
#else
|
||||
dst->timings.refreshRate = drm_mode_vrefresh(src) * 1000;
|
||||
#endif
|
||||
|
||||
dst->timings.pixelClockHz = src->clock * 1000; /* In Hz */
|
||||
|
||||
dst->timings.hVisible = src->hdisplay;
|
||||
dst->timings.hSyncStart = src->hsync_start;
|
||||
dst->timings.hSyncEnd = src->hsync_end;
|
||||
dst->timings.hTotal = src->htotal;
|
||||
dst->timings.hSkew = src->hskew;
|
||||
|
||||
dst->timings.vVisible = src->vdisplay;
|
||||
dst->timings.vSyncStart = src->vsync_start;
|
||||
dst->timings.vSyncEnd = src->vsync_end;
|
||||
dst->timings.vTotal = src->vtotal;
|
||||
|
||||
if (src->flags & DRM_MODE_FLAG_INTERLACE) {
|
||||
dst->timings.flags.interlaced = NV_TRUE;
|
||||
} else {
|
||||
dst->timings.flags.interlaced = NV_FALSE;
|
||||
}
|
||||
|
||||
if (src->flags & DRM_MODE_FLAG_DBLSCAN) {
|
||||
dst->timings.flags.doubleScan = NV_TRUE;
|
||||
} else {
|
||||
dst->timings.flags.doubleScan = NV_FALSE;
|
||||
}
|
||||
|
||||
if (src->flags & DRM_MODE_FLAG_PHSYNC) {
|
||||
dst->timings.flags.hSyncPos = NV_TRUE;
|
||||
} else {
|
||||
dst->timings.flags.hSyncPos = NV_FALSE;
|
||||
}
|
||||
|
||||
if (src->flags & DRM_MODE_FLAG_NHSYNC) {
|
||||
dst->timings.flags.hSyncNeg = NV_TRUE;
|
||||
} else {
|
||||
dst->timings.flags.hSyncNeg = NV_FALSE;
|
||||
}
|
||||
|
||||
if (src->flags & DRM_MODE_FLAG_PVSYNC) {
|
||||
dst->timings.flags.vSyncPos = NV_TRUE;
|
||||
} else {
|
||||
dst->timings.flags.vSyncPos = NV_FALSE;
|
||||
}
|
||||
|
||||
if (src->flags & DRM_MODE_FLAG_NVSYNC) {
|
||||
dst->timings.flags.vSyncNeg = NV_TRUE;
|
||||
} else {
|
||||
dst->timings.flags.vSyncNeg = NV_FALSE;
|
||||
}
|
||||
|
||||
dst->timings.widthMM = src->width_mm;
|
||||
dst->timings.heightMM = src->height_mm;
|
||||
|
||||
memcpy(dst->name, src->name, min(sizeof(dst->name), sizeof(src->name)));
|
||||
}
|
||||
|
||||
#endif
|
||||
54
kernel-open/nvidia-drm/nvidia-drm-utils.h
Normal file
54
kernel-open/nvidia-drm/nvidia-drm-utils.h
Normal file
@@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_UTILS_H__
|
||||
#define __NVIDIA_DRM_UTILS_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
|
||||
|
||||
#include "nvkms-kapi.h"
|
||||
|
||||
enum drm_plane_type;
|
||||
struct drm_display_mode;
|
||||
|
||||
struct NvKmsKapiConnectorInfo*
|
||||
nvkms_get_connector_info(struct NvKmsKapiDevice *pDevice,
|
||||
NvKmsKapiConnector hConnector);
|
||||
|
||||
int nvkms_connector_signal_to_drm_encoder_signal(
|
||||
NvKmsConnectorSignalFormat format);
|
||||
|
||||
int nvkms_connector_type_to_drm_connector_type(NvKmsConnectorType type,
|
||||
NvBool internal);
|
||||
|
||||
void nvkms_display_mode_to_drm_mode(
|
||||
const struct NvKmsKapiDisplayMode *displayMode,
|
||||
struct drm_display_mode *mode);
|
||||
|
||||
void drm_mode_to_nvkms_display_mode(const struct drm_display_mode *src,
|
||||
struct NvKmsKapiDisplayMode *dst);
|
||||
|
||||
#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */
|
||||
|
||||
#endif /* __NVIDIA_DRM_UTILS_H__ */
|
||||
33
kernel-open/nvidia-drm/nvidia-drm.Kbuild
Normal file
33
kernel-open/nvidia-drm/nvidia-drm.Kbuild
Normal file
@@ -0,0 +1,33 @@
|
||||
###########################################################################
|
||||
# Kbuild fragment for nvidia-drm.ko
|
||||
###########################################################################
|
||||
|
||||
# Get our source file list and conftest list from the common file
|
||||
include $(src)/nvidia-drm/nvidia-drm-sources.mk
|
||||
|
||||
# Linux-specific sources
|
||||
NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-linux.c
|
||||
|
||||
#
|
||||
# Define NVIDIA_DRM_{SOURCES,OBJECTS}
|
||||
#
|
||||
|
||||
NVIDIA_DRM_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_DRM_SOURCES))
|
||||
|
||||
obj-m += nvidia-drm.o
|
||||
nvidia-drm-y := $(NVIDIA_DRM_OBJECTS)
|
||||
|
||||
NVIDIA_DRM_KO = nvidia-drm/nvidia-drm.ko
|
||||
|
||||
NV_KERNEL_MODULE_TARGETS += $(NVIDIA_DRM_KO)
|
||||
|
||||
#
|
||||
# Define nvidia-drm.ko-specific CFLAGS.
|
||||
#
|
||||
|
||||
NVIDIA_DRM_CFLAGS += -I$(src)/nvidia-drm
|
||||
NVIDIA_DRM_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0
|
||||
|
||||
$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_DRM_OBJECTS), $(NVIDIA_DRM_CFLAGS))
|
||||
|
||||
NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_DRM_OBJECTS)
|
||||
61
kernel-open/nvidia-drm/nvidia-drm.c
Normal file
61
kernel-open/nvidia-drm/nvidia-drm.c
Normal file
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nvidia-drm.h"
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
#include "nvidia-drm-priv.h"
|
||||
#include "nvidia-drm-drv.h"
|
||||
|
||||
static struct NvKmsKapiFunctionsTable nvKmsFuncsTable = {
|
||||
.versionString = NV_VERSION_STRING,
|
||||
};
|
||||
|
||||
const struct NvKmsKapiFunctionsTable* const nvKms = &nvKmsFuncsTable;
|
||||
|
||||
#endif
|
||||
|
||||
int nv_drm_init(void)
|
||||
{
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
if (!nvKmsKapiGetFunctionsTable(&nvKmsFuncsTable)) {
|
||||
NV_DRM_LOG_ERR(
|
||||
"Version mismatch: nvidia-modeset.ko(%s) nvidia-drm.ko(%s)",
|
||||
nvKmsFuncsTable.versionString, NV_VERSION_STRING);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nvKms->setSuspendResumeCallback(nv_drm_suspend_resume);
|
||||
return nv_drm_probe_devices();
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
void nv_drm_exit(void)
|
||||
{
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
nvKms->setSuspendResumeCallback(NULL);
|
||||
nv_drm_remove_devices();
|
||||
#endif
|
||||
}
|
||||
31
kernel-open/nvidia-drm/nvidia-drm.h
Normal file
31
kernel-open/nvidia-drm/nvidia-drm.h
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_H__
|
||||
#define __NVIDIA_DRM_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
int nv_drm_init(void);
|
||||
void nv_drm_exit(void);
|
||||
|
||||
#endif /* __NVIDIA_DRM_H__ */
|
||||
Reference in New Issue
Block a user